prompt
stringlengths 19
879k
| completion
stringlengths 3
53.8k
| api
stringlengths 8
59
|
---|---|---|
from __future__ import print_function
import sys
import os
import numpy as np
import time
import regreg.api as rr
from selection.reduced_optimization.initial_soln import selection
from selection.tests.instance import logistic_instance, gaussian_instance
#from selection.reduced_optimization.random_lasso_reduced import selection_probability_random_lasso, sel_inf_random_lasso
from selection.reduced_optimization.par_random_lasso_reduced import selection_probability_random_lasso, sel_inf_random_lasso
from selection.reduced_optimization.estimator import M_estimator_approx
def randomized_lasso_trial(X,
y,
beta,
sigma,
lam,
loss ='gaussian',
randomizer='gaussian',
estimation='parametric'):
from selection.api import randomization
n, p = X.shape
if loss == "gaussian":
loss = rr.glm.gaussian(X, y)
elif loss == "logistic":
loss = rr.glm.logistic(X, y)
epsilon = 1. / np.sqrt(n)
W = np.ones(p) * lam
penalty = rr.group_lasso(np.arange(p),weights=dict(zip(np.arange(p), W)), lagrange=1.)
randomization = randomization.isotropic_gaussian((p,), scale=1.)
M_est = M_estimator_approx(loss, epsilon, penalty, randomization, randomizer, estimation)
M_est.solve_approx()
active = M_est._overall
active_set = np.asarray([i for i in range(p) if active[i]])
nactive = np.sum(active)
prior_variance = 10000.
noise_variance = sigma ** 2
projection_active = X[:, active].dot(np.linalg.inv(X[:, active].T.dot(X[:, active])))
M_1 = prior_variance * (X.dot(X.T)) + noise_variance * | np.identity(n) | numpy.identity |
# -*- coding: utf-8 -*-
"""
Created on Tue Nov 3 15:07:16 2017
@author: <NAME>
"""
from __future__ import division, print_function, unicode_literals, absolute_import
import unittest
import os
import sys
import h5py
import numpy as np
import dask.array as da
import matplotlib as mpl
# Attempting to get things to work for all versions of python on Travis
mpl.use('Agg')
from sidpy.hdf.hdf_utils import get_attr
sys.path.append("../../pyUSID/")
from pyUSID.io import USIDataset
from pyUSID.io.hdf_utils.model import reshape_to_n_dims, get_dimensionality
from pyUSID.io.write_utils import Dimension
from . import data_utils
skip_viz_tests = True
if sys.version_info.major == 3:
unicode = str
if sys.version_info.minor > 4:
skip_viz_tests = False
test_h5_file_path = data_utils.std_beps_path
class TestBEPS(unittest.TestCase):
def setUp(self):
data_utils.make_beps_file()
self.orig_labels_order = ['X', 'Y', 'Cycle', 'Bias']
self.h5_file = h5py.File(data_utils.std_beps_path, mode='r')
h5_grp = self.h5_file['/Raw_Measurement/']
self.source_nd_s2f = h5_grp['n_dim_form'][()]
self.source_nd_f2s = self.source_nd_s2f.transpose(1, 0, 3, 2)
self.h5_source = USIDataset(h5_grp['source_main'])
self.pos_dims=[]
self.spec_dims=[]
for dim_name, dim_units in zip(self.h5_source.pos_dim_labels,
get_attr(self.h5_source.h5_pos_inds, 'units')):
self.pos_dims.append(
Dimension(dim_name, dim_units, h5_grp[dim_name][()]))
for dim_name, dim_units in zip(self.h5_source.spec_dim_labels,
get_attr(self.h5_source.h5_spec_inds, 'units')):
self.spec_dims.append(
Dimension(dim_name, dim_units, h5_grp[dim_name][()]))
res_grp_0 = h5_grp['source_main-Fitter_000']
self.results_0_nd_s2f = res_grp_0['n_dim_form'][()]
self.results_0_nd_f2s = self.results_0_nd_s2f.transpose(1, 0, 3, 2)
self.h5_compound = USIDataset(res_grp_0['results_main'])
res_grp_1 = h5_grp['source_main-Fitter_001']
self.results_1_nd_s2f = res_grp_1['n_dim_form'][()]
self.results_1_nd_f2s = self.results_1_nd_s2f.transpose(1, 0, 3, 2)
self.h5_complex = USIDataset(res_grp_1['results_main'])
def tearDown(self):
self.h5_file.close()
os.remove(data_utils.std_beps_path)
class TestUSIDatasetReal(unittest.TestCase):
def setUp(self):
self.rev_spec = False
data_utils.make_beps_file(rev_spec=self.rev_spec)
self.orig_labels_order = ['X', 'Y', 'Cycle', 'Bias'] if self.rev_spec else ['X', 'Y', 'Bias', 'Cycle']
def tearDown(self):
os.remove(test_h5_file_path)
def get_expected_n_dim(self, h5_f):
nd_slow_to_fast = h5_f['/Raw_Measurement/n_dim_form'][()]
nd_fast_to_slow = nd_slow_to_fast.transpose(1, 0, 3, 2)
if self.rev_spec:
nd_fast_to_slow = nd_fast_to_slow.transpose(0, 1, 3, 2)
return nd_slow_to_fast, nd_fast_to_slow
class TestStringRepr(TestBEPS):
def test_string_representation(self):
usi_dset = self.h5_source
h5_main = self.h5_file[usi_dset.name]
actual = usi_dset.__repr__()
actual = [line.strip() for line in actual.split("\n")]
actual = [actual[line_ind] for line_ind in [0, 2, 4, 7, 8, 10, 11]]
expected = list()
expected.append(h5_main.__repr__())
expected.append(h5_main.name)
expected.append(get_attr(h5_main, "quantity") + " (" + get_attr(h5_main, "units") + ")")
for h5_inds in [usi_dset.h5_pos_inds, usi_dset.h5_spec_inds]:
for dim_name, dim_size in zip(get_attr(h5_inds, "labels"),
get_dimensionality(h5_inds)):
expected.append(dim_name + ' - size: ' + str(dim_size))
self.assertTrue(np.all([x == y for x, y in zip(actual, expected)]))
class TestEquality(TestBEPS):
def test_correct_USIDataset(self):
expected = USIDataset(self.h5_source)
self.assertTrue(expected == expected)
def test_correct_h5_dataset(self):
h5_main = self.h5_file[self.h5_source.name]
expected = USIDataset(h5_main)
self.assertTrue(expected == h5_main)
def test_incorrect_USIDataset(self):
with h5py.File(test_h5_file_path, mode='r') as h5_f:
h5_main = h5_f['/Raw_Measurement/source_main']
expected = USIDataset(h5_main)
incorrect = USIDataset(h5_f['/Raw_Measurement/source_main-Fitter_000/results_main'])
self.assertFalse(expected == incorrect)
def test_incorrect_h5_dataset(self):
with h5py.File(test_h5_file_path, mode='r') as h5_f:
h5_main = h5_f['/Raw_Measurement/source_main']
expected = USIDataset(h5_main)
incorrect = h5_f['/Raw_Measurement/source_main-Fitter_000/Spectroscopic_Indices']
self.assertFalse(expected == incorrect)
def test_incorrect_object(self):
with h5py.File(test_h5_file_path, mode='r') as h5_f:
h5_main = h5_f['/Raw_Measurement/source_main']
expected = USIDataset(h5_main)
incorrect = np.zeros(shape=(1, 2, 3, 4))
self.assertFalse(expected == incorrect)
class TestGetNDimFormExistsReal(TestUSIDatasetReal):
def test_sorted_and_unsorted(self):
with h5py.File(test_h5_file_path, mode='r') as h5_f:
usi_dset = USIDataset(h5_f['/Raw_Measurement/source_main'])
nd_slow_to_fast, nd_fast_to_slow = self.get_expected_n_dim(h5_f)
actual_f2s = usi_dset.get_n_dim_form(lazy=False)
self.assertTrue(np.allclose(nd_fast_to_slow, actual_f2s))
nd_form, success = reshape_to_n_dims(usi_dset, sort_dims=True)
print(nd_form.shape)
usi_dset.toggle_sorting()
actual_s2f = usi_dset.get_n_dim_form(lazy=False)
self.assertTrue(np.allclose(nd_slow_to_fast, actual_s2f))
class TestPosSpecSlicesReal(TestUSIDatasetReal):
def test_empty_dict(self):
with h5py.File(test_h5_file_path, mode='r') as h5_f:
usi_main = USIDataset(h5_f['/Raw_Measurement/source_main'])
actual_pos, actual_spec = usi_main._get_pos_spec_slices({})
self.assertTrue(np.allclose(np.expand_dims(np.arange(14), axis=1), actual_spec))
self.assertTrue(np.allclose(np.expand_dims(np.arange(15), axis=1), actual_pos))
def test_non_existent_dim(self):
with h5py.File(test_h5_file_path, mode='r') as h5_f:
usi_main = USIDataset(h5_f['/Raw_Measurement/source_main'])
with self.assertRaises(KeyError):
_ = usi_main._get_pos_spec_slices({'blah': 4, 'X': 3, 'Y': 1})
def test_incorrect_type(self):
with h5py.File(test_h5_file_path, mode='r') as h5_f:
usi_main = USIDataset(h5_f['/Raw_Measurement/source_main'])
with self.assertRaises(TypeError):
_ = usi_main._get_pos_spec_slices({'X': 'fdfd', 'Y': 1})
def test_negative_index(self):
with h5py.File(test_h5_file_path, mode='r') as h5_f:
usi_main = USIDataset(h5_f['/Raw_Measurement/source_main'])
with self.assertRaises(ValueError):
_ = usi_main._get_pos_spec_slices({'X': -4, 'Y': 1})
def test_out_of_bounds(self):
with h5py.File(test_h5_file_path, mode='r') as h5_f:
usi_main = USIDataset(h5_f['/Raw_Measurement/source_main'])
with self.assertRaises(IndexError):
_ = usi_main._get_pos_spec_slices({'X': 15, 'Y': 1})
def test_one_pos_dim_removed(self):
with h5py.File(test_h5_file_path, mode='r') as h5_f:
usi_main = USIDataset(h5_f['/Raw_Measurement/source_main'])
# orig_pos = np.vstack([np.tile(np.arange(5), 3), np.repeat(np.arange(3), 5)]).T
# orig_spec = np.vstack([np.tile(np.arange(7), 2), np.repeat(np.arange(2), 7)])
actual_pos, actual_spec = usi_main._get_pos_spec_slices({'X': 3})
# we want every fifth position starting from 3
expected_pos = np.expand_dims(np.arange(3, 15, 5), axis=1)
expected_spec = np.expand_dims(np.arange(14), axis=1)
self.assertTrue(np.allclose(expected_spec, actual_spec))
self.assertTrue(np.allclose(expected_pos, actual_pos))
def test_one_pos_dim_sliced(self):
with h5py.File(test_h5_file_path, mode='r') as h5_f:
usi_main = USIDataset(h5_f['/Raw_Measurement/source_main'])
actual_pos, actual_spec = usi_main._get_pos_spec_slices({'X': slice(1, 5, 2)})
# we want every fifth position starting from 3
positions = []
for row_ind in range(3):
for col_ind in range(1, 5, 2):
positions.append(5 * row_ind + col_ind)
expected_pos = np.expand_dims(positions, axis=1)
expected_spec = np.expand_dims(np.arange(14), axis=1)
self.assertTrue(np.allclose(expected_spec, actual_spec))
self.assertTrue(np.allclose(expected_pos, actual_pos))
def test_two_pos_dim_sliced(self):
with h5py.File(test_h5_file_path, mode='r') as h5_f:
usi_main = USIDataset(h5_f['/Raw_Measurement/source_main'])
actual_pos, actual_spec = usi_main._get_pos_spec_slices({'X': slice(1, 5, 2), 'Y': 1})
# we want every fifth position starting from 3
positions = []
for row_ind in range(1, 2):
for col_ind in range(1, 5, 2):
positions.append(5 * row_ind + col_ind)
expected_pos = np.expand_dims(positions, axis=1)
expected_spec = np.expand_dims( | np.arange(14) | numpy.arange |
import matplotlib
matplotlib.use('Agg') # use a non-interactive backend
import CoolProp
import os.path
web_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
tar_fil = os.path.join(web_dir, '_static', 'CoolPropLogo.png')
tar_fil_long = os.path.join(web_dir, '_static', 'CoolPropLogoLong.png')
tar_fil_long_large = os.path.join(web_dir, '_static', 'CoolPropLogoLongLarge.png')
import matplotlib
import numpy as np
import CoolProp as CP
import matplotlib.pyplot as plt
import scipy.interpolate
# Prepare the constants
Water = CP.AbstractState("HEOS", "Water")
pc = Water.keyed_output(CP.iP_critical)
Tc = Water.keyed_output(CP.iT_critical)
T_min = 200
T_max = 1000
p_max = Water.keyed_output(CP.iP_max)
p_triple = 611.657
T_triple = 273.16
# Prepare the data for the melting line
steps = 2000
TT = []
PP = list(np.logspace(np.log10(p_triple), np.log10(p_max), steps))
for p in PP:
TT.append(Water.melting_line(CP.iT, CP.iP, p))
# Zone VI
for T in np.linspace(max(TT), 355, int(steps / 10)):
TT.append(T)
theta = T / 273.31
pi = 1 - 1.07476 * (1 - theta**4.6)
p = pi * 632.4e6
PP.append(p)
# Zone VII
for T in np.linspace(355, 715, int(steps / 10)):
TT.append(T)
theta = T / 355
lnpi = 0.173683e1 * (1 - 1 / theta) - 0.544606e-1 * (1 - theta**5) + 0.806106e-7 * (1 - theta**22)
p = np.exp(lnpi) * 2216e6
PP.append(p)
# Changes number of points
steps = int(steps / 10.0)
p_melt = np.logspace(np.log10(np.min(PP)), np.log10(np.max(PP)), steps)
T_melt_f = scipy.interpolate.interp1d(np.log10(PP), TT)
#T_melt_f = scipy.interpolate.spline(np.log10(PP),TT,np.log10(p_melt))
T_melt = T_melt_f(np.log10(p_melt))
#T_melt = np.array(TT)
#p_melt = np.array(PP)
#
# Prepare the data for the saturation line
T_sat = np.linspace(T_triple, Tc, len(T_melt))
p_sat = CP.CoolProp.PropsSI('P', 'T', T_sat, 'Q', [0] * len(T_sat), 'Water')
#
# Prepare density data
TT, DD, PP = [], [], []
for T in np.linspace(T_min, T_max, steps):
for p in np.logspace(np.log10(np.min(p_melt)), np.log10(np.max(p_melt)), steps):
Tm = scipy.interpolate.interp1d(p_melt, T_melt)(p)
if T < Tm: continue
if p > p_max: pin = p_max
else: pin = p
D = CP.CoolProp.PropsSI('D', 'T', T, 'P', pin, 'Water')
TT.append(T)
DD.append(np.log10(D))
PP.append(p)
#tt = np.linspace(T_min, T_max, steps)
#pp = np.logspace(np.log10(p_triple), np.log10(p_max), steps)
#tt, pp = np.meshgrid(tt, pp)
#dd = np.empty(tt.shape)
#dd[:][:] = np.NAN
#nr,nc = tt.shape
# for i in range(nr):
# for j in range(nc):
#Tm = T_melt_f(np.log10(pp[i][j]))
# if tt[i][j] < Tm: continue
#D = CP.CoolProp.PropsSI('D','T',tt[i][j],'P',pp[i][j],'Water')
#dd[i][j] = np.log10(D)
#
# Define colours etc
lw = 3
melt_args = dict(color='orange', lw=lw, solid_capstyle='round')
sat_args = melt_args.copy()
nm = matplotlib.colors.Normalize(min(DD), max(DD))
rho_args = dict(cmap=plt.cm.get_cmap('Blues'), norm=nm)
fig = plt.figure(figsize=(1.0, 1.0))
ax = fig.add_axes((0.0, 0.0, 1.0, 1.0))
plt.plot(T_melt, p_melt, **melt_args)
plt.plot(T_sat, p_sat, **sat_args)
plt.scatter(TT, PP, c=DD, edgecolor='none', s=6, **rho_args)
#plt.contourf(tt, pp, dd, steps, **rho_args )
delta_x = | np.min(T_melt) | numpy.min |
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import ctypes
import argparse
import numpy as np
import tensorrt as trt
import pycuda.driver as cuda
import pycuda.autoinit
import json
TRT_LOGGER = trt.Logger(trt.Logger.INFO)
def parse_args():
"""
Parse command line arguments
"""
parser = argparse.ArgumentParser(description='BERT QA Inference')
parser.add_argument('-e', '--yolo_engine', dest='yolo_engine', default='/mnt/data/yu.huang/github.com/wang-xinyu/tensorrtx/yolov5/yolov5s.wts',
help='Path to yolo TensorRT engine')
# parser.add_argument('-p', '--passage', nargs='*',
# help='Text for paragraph/passage for BERT QA',
# default='')
# parser.add_argument('-pf', '--passage-file',
# help='File containing input passage',
# default='')
# parser.add_argument('-q', '--question', nargs='*',
# help='Text for query/question for BERT QA',
# default='')
# parser.add_argument('-qf', '--question-file',
# help='File containiner input question',
# default='')
# parser.add_argument('-v', '--vocab-file',
# help='Path to file containing entire understandable vocab',
# default='./pre-trained_model/uncased_L-24_H-1024_A-16/vocab.txt')
# parser.add_argument('-s', '--sequence-length',
# help='The sequence length to use. Defaults to 128',
# default=128, type=int)
args, _ = parser.parse_known_args()
return args
import glob
import math
import os
import random
import shutil
import time
from pathlib import Path
from threading import Thread
import cv2
import numpy as np
import torch
from PIL import Image, ExifTags
img_formats = ['.bmp', '.jpg', '.jpeg', '.png', '.tif', '.dng']
vid_formats = ['.mov', '.avi', '.mp4']
def letterbox(img, new_shape=(416, 416), color=(114, 114, 114), auto=True, scaleFill=False, scaleup=True):
# Resize image to a 32-pixel-multiple rectangle https://github.com/ultralytics/yolov3/issues/232
shape = img.shape[:2] # current shape [height, width]
if isinstance(new_shape, int):
new_shape = (new_shape, new_shape)
# Scale ratio (new / old)
r = min(new_shape[0] / shape[0], new_shape[1] / shape[1])
if not scaleup: # only scale down, do not scale up (for better test mAP)
r = min(r, 1.0)
# Compute padding
ratio = r, r # width, height ratios
new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r))
dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1] # wh padding
if auto: # minimum rectangle
dw, dh = np.mod(dw, 64), | np.mod(dh, 64) | numpy.mod |
import sys
import math
import struct
import threading
import logging
import multiprocessing
from contextlib import contextmanager
import lmdb
import cv2
import numpy as np
import time
import tensorflow as tf
from tensorpack import imgaug
from tensorpack.dataflow.image import MapDataComponent, AugmentImageComponent
from tensorpack.dataflow.common import BatchData, MapData, TestDataSpeed
from tensorpack.dataflow.prefetch import PrefetchData
from tensorpack.dataflow.base import RNGDataFlow, DataFlowTerminated
from datum_pb2 import Datum
from pose_augment import pose_flip, pose_rotation, pose_to_img, pose_crop_random, \
pose_resize_shortestedge_random, pose_resize_shortestedge_fixed, pose_crop_center, pose_random_scale
import matplotlib as mpl
logging.basicConfig(level=logging.DEBUG, format='[lmdb_dataset] %(asctime)s %(levelname)s %(message)s')
class CocoMetadata:
# __coco_parts = 57
__coco_parts = 19
__coco_vecs = list(zip(
[2, 9, 10, 2, 12, 13, 2, 3, 4, 3, 2, 6, 7, 6, 2, 1, 1, 15, 16],
[9, 10, 11, 12, 13, 14, 3, 4, 5, 17, 6, 7, 8, 18, 1, 15, 16, 17, 18]
))
@staticmethod
def parse_float(four_np):
assert len(four_np) == 4
return struct.unpack('<f', bytes(four_np))[0]
@staticmethod
def parse_floats(four_nps, adjust=0):
assert len(four_nps) % 4 == 0
return [(CocoMetadata.parse_float(four_nps[x*4:x*4+4]) + adjust) for x in range(len(four_nps) // 4)]
def __init__(self, idx, img, meta, sigma):
self.idx = idx
self.img = img
self.sigma = sigma
self.height = int(CocoMetadata.parse_float(meta[1][:4]))
self.width = int(CocoMetadata.parse_float(meta[1][4:8]))
self.num_other_people = meta[2][1]
self.people_index = meta[2][2]
# self.objpos_x = CocoMetadata.parse_float(meta[3][:4]) - 1
# self.objpos_y = CocoMetadata.parse_float(meta[3][4:8]) - 1
# self.objpos = [(self.objpos_x, self.objpos_y)]
joint_list = []
joint_x = CocoMetadata.parse_floats(meta[5][:CocoMetadata.__coco_parts*4], adjust=-1)
joint_y = CocoMetadata.parse_floats(meta[6][:CocoMetadata.__coco_parts*4], adjust=-1)
joint_list.append(list(zip(joint_x, joint_y)))
for person_idx in range(self.num_other_people):
# objpos_x = CocoMetadata.parse_float(meta[8+person_idx][:4]) - 1
# objpos_y = CocoMetadata.parse_float(meta[8+person_idx][4:8]) - 1
# self.objpos.append((objpos_x, objpos_y))
joint_x = CocoMetadata.parse_floats(meta[9+self.num_other_people+3*person_idx][:CocoMetadata.__coco_parts*4], adjust=-1)
joint_y = CocoMetadata.parse_floats(meta[9+self.num_other_people+3*person_idx+1][:CocoMetadata.__coco_parts*4], adjust=-1)
joint_x = [val for val in joint_x if val >= 0 or -1000]
joint_y = [val for val in joint_y if val >= 0 or -1000]
joint_list.append(list(zip(joint_x, joint_y)))
self.joint_list = []
transform = list(zip(
[1, 6, 7, 9, 11, 6, 8, 10, 13, 15, 17, 12, 14, 16, 3, 2, 5, 4],
[1, 7, 7, 9, 11, 6, 8, 10, 13, 15, 17, 12, 14, 16, 3, 2, 5, 4]
))
for prev_joint in joint_list:
new_joint = []
for idx1, idx2 in transform:
j1 = prev_joint[idx1-1]
j2 = prev_joint[idx2-1]
if j1[0] <= 0 or j1[1] <= 0 or j2[0] <= 0 or j2[1] <= 0:
new_joint.append((-1000, -1000))
else:
new_joint.append(((j1[0] + j2[0]) / 2, (j1[1] + j2[1]) / 2))
new_joint.append((-1000, -1000))
self.joint_list.append(new_joint)
logging.debug('joint size=%d' % len(self.joint_list))
def get_heatmap(self, target_size):
heatmap = np.zeros((CocoMetadata.__coco_parts, self.height, self.width))
for joints in self.joint_list:
for idx, point in enumerate(joints):
if point[0] < 0 or point[1] < 0:
continue
CocoMetadata.put_heatmap(heatmap, idx, point, self.sigma)
heatmap = heatmap.transpose((1, 2, 0))
# background
heatmap[:, :, -1] = np.clip(1 - np.amax(heatmap, axis=2), 0.0, 1.0)
if target_size:
heatmap = cv2.resize(heatmap, target_size, interpolation=cv2.INTER_AREA)
return heatmap
@staticmethod
def put_heatmap(heatmap, plane_idx, center, sigma):
center_x, center_y = center
_, height, width = heatmap.shape[:3]
th = 4.6052
delta = math.sqrt(th * 2)
x0 = int(max(0, center_x - delta * sigma))
y0 = int(max(0, center_y - delta * sigma))
x1 = int(min(width, center_x + delta * sigma))
y1 = int(min(height, center_y + delta * sigma))
for y in range(y0, y1):
for x in range(x0, x1):
d = (x - center_x) ** 2 + (y - center_y) ** 2
exp = d / 2.0 / sigma / sigma
if exp > th:
continue
heatmap[plane_idx][y][x] = max(heatmap[plane_idx][y][x], math.exp(-exp))
heatmap[plane_idx][y][x] = min(heatmap[plane_idx][y][x], 1.0)
def get_vectormap(self, target_size):
vectormap = np.zeros((CocoMetadata.__coco_parts*2, self.height, self.width))
countmap = np.zeros((CocoMetadata.__coco_parts, self.height, self.width))
for joints in self.joint_list:
for plane_idx, (j_idx1, j_idx2) in enumerate(CocoMetadata.__coco_vecs):
j_idx1 -= 1
j_idx2 -= 1
center_from = joints[j_idx1]
center_to = joints[j_idx2]
if center_from[0] < -100 or center_from[1] < -100 or center_to[0] < -100 or center_to[1] < -100:
continue
CocoMetadata.put_vectormap(vectormap, countmap, plane_idx, center_from, center_to)
vectormap = vectormap.transpose((1, 2, 0))
nonzeros = np.nonzero(countmap)
for p, y, x in zip(nonzeros[0], nonzeros[1], nonzeros[2]):
if countmap[p][y][x] <= 0:
continue
vectormap[y][x][p*2+0] /= countmap[p][y][x]
vectormap[y][x][p*2+1] /= countmap[p][y][x]
if target_size:
vectormap = cv2.resize(vectormap, target_size, interpolation=cv2.INTER_AREA)
return vectormap
@staticmethod
def put_vectormap(vectormap, countmap, plane_idx, center_from, center_to, threshold=8):
_, height, width = vectormap.shape[:3]
vec_x = center_to[0] - center_from[0]
vec_y = center_to[1] - center_from[1]
min_x = max(0, int(min(center_from[0], center_to[0]) - threshold))
min_y = max(0, int(min(center_from[1], center_to[1]) - threshold))
max_x = min(width, int(max(center_from[0], center_to[0]) + threshold))
max_y = min(height, int(max(center_from[1], center_to[1]) + threshold))
norm = math.sqrt(vec_x ** 2 + vec_y ** 2)
if norm == 0:
return
vec_x /= norm
vec_y /= norm
for y in range(min_y, max_y):
for x in range(min_x, max_x):
bec_x = x - center_from[0]
bec_y = y - center_from[1]
dist = abs(bec_x * vec_y - bec_y * vec_x)
if dist > threshold:
continue
countmap[plane_idx][y][x] += 1
vectormap[plane_idx*2+0][y][x] = vec_x
vectormap[plane_idx*2+1][y][x] = vec_y
class CocoPoseLMDB(RNGDataFlow):
__valid_i = 2745
__max_key = 121745
@staticmethod
def display_image(inp, heatmap, vectmap, as_numpy=False):
if as_numpy:
mpl.use('Agg')
import matplotlib.pyplot as plt
fig = plt.figure()
a = fig.add_subplot(2, 2, 1)
a.set_title('Image')
plt.imshow(CocoPoseLMDB.get_bgimg(inp))
a = fig.add_subplot(2, 2, 2)
a.set_title('Heatmap')
plt.imshow(CocoPoseLMDB.get_bgimg(inp, target_size=(heatmap.shape[1], heatmap.shape[0])), alpha=0.5)
tmp = np.amax(heatmap, axis=2)
plt.imshow(tmp, cmap=plt.cm.gray, alpha=0.5)
plt.colorbar()
tmp2 = vectmap.transpose((2, 0, 1))
tmp2_odd = np.amax(np.absolute(tmp2[::2, :, :]), axis=0)
tmp2_even = np.amax(np.absolute(tmp2[1::2, :, :]), axis=0)
a = fig.add_subplot(2, 2, 3)
a.set_title('Vectormap-x')
plt.imshow(CocoPoseLMDB.get_bgimg(inp, target_size=(vectmap.shape[1], vectmap.shape[0])), alpha=0.5)
plt.imshow(tmp2_odd, cmap=plt.cm.gray, alpha=0.5)
plt.colorbar()
a = fig.add_subplot(2, 2, 4)
a.set_title('Vectormap-y')
plt.imshow(CocoPoseLMDB.get_bgimg(inp, target_size=(vectmap.shape[1], vectmap.shape[0])), alpha=0.5)
plt.imshow(tmp2_even, cmap=plt.cm.gray, alpha=0.5)
plt.colorbar()
if not as_numpy:
plt.show()
else:
fig.canvas.draw()
data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='')
data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,))
fig.clear()
plt.close()
return data
@staticmethod
def get_bgimg(inp, target_size=None):
if target_size:
inp = cv2.resize(inp, target_size, interpolation=cv2.INTER_AREA)
inp = cv2.cvtColor(((inp + 1.0) * (255.0 / 2.0)).astype(np.uint8), cv2.COLOR_BGR2RGB)
return inp
def __init__(self, path, is_train=True, decode_img=True, only_idx=-1):
self.is_train = is_train
self.decode_img = decode_img
self.only_idx = only_idx
self.env = lmdb.open(path, map_size=int(1e12), readonly=True)
self.txn = self.env.begin(buffers=True)
pass
def size(self):
if self.is_train:
return CocoPoseLMDB.__max_key - CocoPoseLMDB.__valid_i
else:
return CocoPoseLMDB.__valid_i
def get_data(self):
idxs = np.arange(self.size())
if self.is_train:
idxs += CocoPoseLMDB.__valid_i
self.rng.shuffle(idxs)
else:
pass
for idx in idxs:
datum = Datum()
if self.only_idx < 0:
s = self.txn.get(('%07d' % idx).encode('utf-8'))
else:
s = self.txn.get(('%07d' % self.only_idx).encode('utf-8'))
datum.ParseFromString(s)
if isinstance(datum.data, bytes):
data = | np.fromstring(datum.data, dtype=np.uint8) | numpy.fromstring |
"""
Author: <NAME>
Classes and functions for linearized DSGEs.
"""
import warnings
import pandas as pd
from tqdm import tqdm
from sympy import simplify, Matrix
from scipy.linalg import qz
from scipy.stats import beta, gamma, invgamma, norm, uniform
import matplotlib.pyplot as plt
from pykalman import KalmanFilter
from numpy.linalg import svd, inv, eig
from tables import PerformanceWarning
from scipy.optimize import minimize, basinhopping
from numpy.random import multivariate_normal, rand, seed
from numpy import diagonal, vstack, array, eye, where, diag, sqrt, hstack, zeros, \
arange, exp, log, inf, nan, isnan, isinf, set_printoptions, matrix, linspace
pd.set_option('display.max_columns', 20)
set_printoptions(precision=4, suppress=True, linewidth=150)
warnings.filterwarnings('ignore', category=RuntimeWarning)
warnings.filterwarnings('ignore', category=PerformanceWarning)
class DSGE(object):
"""
This is the main class which holds a DSGE model with all its attributes and methods.
"""
chains = None
prior_info = None
has_solution = False
posterior_table = None
def __init__(self, endog, endogl, exog, expec, state_equations, obs_equations=None, estimate_params=None,
calib_dict=None, prior_dict=None, obs_data=None, verbose=False):
"""
Model declaration requires passing SymPy symbols as variables and parameters. Some arguments can be left empty
if you are working with simulations of calibrated models.
:param endog: SymPy matrix of symbols containing the endogenous variables.
:param endogl: SymPy matrix of symbols containing the lagged endogenous variables.
:param exog: SymPy matrix of symbols containing the exogenous shocks.
:param expec: SymPy matrix of symbols containing the expectational errors.
:param state_equations: SymPy matrix of symbolic expressions representing the model's equilibrium conditions,
with zeros on the right-hand side of the equality.
:param obs_equations: SymPy matrix of symbolic expressions representing the model's observation equations, with
observable variables on the left-hand side of the equation. This is only required if the
model is going to be estimated. You do not need to provide observation equations to run
simulations on a calibrated model.
:param estimate_params: SymPy matrix of symbols containing the parameters that are free to be estimated.
:param calib_dict: dict. Keys are the symbols of parameters that are going to be calibrated, and values are
their calibrated value.
:param prior_dict: dict. Entries must have symbols of parameters that are going to be estimated. Values are
dictionaries containing the following entries:
- 'dist': prior distribution. 'normal', 'beta', 'gamma' or 'invgamma'.
- 'mean': mean of the prior distribution.
- 'std': standard deviation of the prior distribution.
- 'label': str with name/representation of the estimated parameter. This argument accepts
LaTeX representations.
:param obs_data: pandas DataFrame with the observable variables. Columns must be in the same order as the
'obs_equations' declarations.
:param verbose: <not implemented yet>
"""
self.verbose = verbose
self.endog = endog
self.endogl = endogl
self.exog = exog
self.expec = expec
self.params = estimate_params
self.state_equations = state_equations
self.obs_equations = obs_equations
self.prior_dict = prior_dict
self.data = obs_data
self.n_state = len(endog)
self.n_obs = len(endog) if obs_equations is None else len(obs_equations)
self.n_param = None if estimate_params is None else len(estimate_params)
# TODO aqui pode vir um check the obs data e obs equations
if (obs_equations is None) and (obs_data is None):
generate_obs = True
else:
generate_obs = False
self._get_jacobians(generate_obs=generate_obs)
if estimate_params is None:
# If no parameters are going to be estimated, calibrate the whole model
self.Gamma0, self.Gamma1, self.Psi, self.Pi, self.C_in, self.obs_matrix, self.obs_offset = \
self._eval_matrix(calib_dict, to_array=True)
self.G1, self.C_out, self.impact, self.fmat, self.fwt, self.ywt, self.gev, self.eu, self.loose = \
gensys(self.Gamma0, self.Gamma1, self.C_in, self.Psi, self.Pi)
# TODO assert that there are no symbols left
self.has_solution = True
else:
# Otherwise, calibrate only the required parameters
self.Gamma0, self.Gamma1, self.Psi, self.Pi, self.C_in, self.obs_matrix, self.obs_offset = \
self._eval_matrix(calib_dict, to_array=False)
self.prior_info = self._get_prior_info()
def simulate(self, n_obs=100, random_seed=None):
"""
Given a calibrate or estimated model, simulates values of the endogenous variables based on random samples of
the exogenous shocks.
:param n_obs: number of observation in the time dimension.
:param random_seed: random seed for the simulation.
:return: pandas DataFrame. 'df_obs' contains the simualtions for the observable variables. 'df_state' contains
the simulations for the state/endogenous variables.
"""
# TODO se não tiver equações de observações, retornar None para o 'df_obs'
assert self.has_solution, "No solution was generated yet"
if not (random_seed is None):
seed(random_seed)
kf = KalmanFilter(self.G1, self.obs_matrix, self.impact @ self.impact.T, None,
self.C_out.reshape(self.n_state), self.obs_offset.reshape(self.n_obs))
simul_data = kf.sample(n_obs)
state_names = [str(s) for s in list(self.endog)]
obs_names = [f'obs {i+1}' for i in range(self.obs_matrix.shape[0])]
df_obs = pd.DataFrame(data=simul_data[1], columns=obs_names)
df_states = pd.DataFrame(data=simul_data[0], columns=state_names)
return df_obs, df_states
def estimate(self, file_path, nsim=1000, ck=0.2):
"""
Run the MCMC estimation.
:param file_path: str. Save path where the MCMC chains are saved. The file format is HDF5 (.h5). This file
format gets very heavy but has very fast read/write speed. If the file already exists, the
estimation will resume from these previously simulated chains.
:param nsim: Length of the MCMC chains to be generated. If the chains are already stable, this is the number of
draws from the posterior distribution.
:param ck: float. Scaling factor of the hessian matrix of the mode of the posterior distribution, which is used
as the covariance matrix for the MCMC algorithm. Bayesian literature says this value needs to be
calibrated in order to achieve your desired acceptance rate from the posterior draws.
:return: the 'chains' attribute of this DSGE instance is generated.
"""
try:
df_chains = pd.read_hdf(file_path, key='chains')
sigmak = pd.read_hdf(file_path, key='sigmak')
start = df_chains.index[-1]
except FileNotFoundError:
def obj_func(theta_irr):
theta_irr = {k: v for k, v in zip(self.params, theta_irr)}
theta_res = self._irr2res(theta_irr)
return -1 * self._calc_posterior(theta_res)
theta_res0 = {k: v for k, v in zip(self.params, self.prior_info['mean'].values)}
theta_irr0 = self._res2irr(theta_res0)
theta_irr0 = array(list(theta_irr0.values()))
# Optimization - SciPy minimize
res = minimize(obj_func, theta_irr0, options={'disp': True}, method='BFGS')
theta_mode_irr = {k: v for k, v in zip(self.params, res.x)}
theta_mode_res = self._irr2res(theta_mode_irr)
sigmak = ck * res.hess_inv
if self.verbose:
print('===== Posterior Mode =====')
print(theta_mode_res, '\n')
print('===== MH jump covariance =====')
print(sigmak, '\n')
print('===== Eigenvalues of MH jump convariance =====')
print(eig(sigmak)[0], '\n')
# Optimization - Basinhoping
# res = basinhopping(obj_func, theta_irr0)
# theta_mode_irr = {k: v for k, v in zip(self.params, res.x)}
# theta_mode_res = self._irr2res(theta_mode_irr)
# sigmak = ck * res.hess_inv
# Overrides the result of the optimization
# theta_mode_res = self.prior_info['mean']
# sigmak = ck * eye(self.n_param)
df_chains = pd.DataFrame(columns=[str(p) for p in list(self.params)], index=range(nsim))
df_chains.loc[0] = list(theta_mode_res.values())
start = 0
# Metropolis-Hastings
muk = zeros(self.n_param)
accepted = 0
for ii in tqdm(range(start + 1, start+nsim), 'Metropolis-Hastings'):
theta1 = {k: v for k, v in zip(self.params, df_chains.loc[ii - 1].values)}
pos1 = self._calc_posterior(theta1)
omega1 = self._res2irr(theta1)
omega2 = array(list(omega1.values())) + multivariate_normal(muk, sigmak)
omega2 = {k: v for k, v in zip(self.params, omega2)}
theta2 = self._irr2res(omega2)
pos2 = self._calc_posterior(theta2)
ratio = exp(pos2 - pos1)
if ratio > rand(1)[0]:
accepted += 1
df_chains.loc[ii] = list(theta2.values())
else:
df_chains.loc[ii] = df_chains.loc[ii - 1]
if ii % 100 == 0:
store = pd.HDFStore(file_path)
store['chains'] = df_chains
store['sigmak'] = pd.DataFrame(data=sigmak)
store.close()
store = pd.HDFStore(file_path)
store['chains'] = df_chains
store['sigmak'] = pd.DataFrame(data=sigmak)
store.close()
self.chains = df_chains.astype(float)
if self.verbose:
print('Acceptance rate:', 100 * (accepted / nsim), 'percent')
def eval_chains(self, burnin=0.3, load_chain=None, show_charts=False):
"""
:param burnin: int or float. Number of observations on the begging of the chain that are going to be dropped to
compute posterior statistics.
:param load_chain: str. Save pathe of the HDF5 file with the chains. Only required if the chains were not loaded
in the estimation step.
:param show_charts: bool. If True, prior-posterior chart is shown. Red line are the theoretical prior densities,
blues bars are the empirical posterior densities.
:return: the 'posterior_table' attribute of this DSGE instance is generated.
"""
# TODO Output a model calibrated with posteriors
if not (load_chain is None):
try:
self.chains = pd.read_hdf(load_chain, key='chains').astype(float)
except FileNotFoundError:
raise FileNotFoundError('Chain file not found')
assert not (self.chains is None), 'There are no loaded chains'
chain_size = self.chains.shape[0]
if type(burnin) is float and 0 <= burnin < 1:
df_chains = self.chains.iloc[int(chain_size * burnin):]
elif type(burnin) is int and burnin < chain_size:
df_chains = self.chains.iloc[burnin + 1:]
else:
raise ValueError("'burnin' must be either an int smaller than the chain size or a float between 0 and 1")
self._plot_chains(chains=df_chains, show_charts=show_charts)
self._plot_prior_posterior(chains=df_chains, show_charts=show_charts)
self.posterior_table = self._posterior_table(chains=df_chains)
def _get_jacobians(self, generate_obs):
# State Equations
self.Gamma0 = self.state_equations.jacobian(self.endog)
self.Gamma1 = -self.state_equations.jacobian(self.endogl)
self.Psi = -self.state_equations.jacobian(self.exog)
self.Pi = -self.state_equations.jacobian(self.expec)
self.C_in = simplify(self.state_equations
- self.Gamma0 @ self.endog
+ self.Gamma1 @ self.endogl
+ self.Psi @ self.exog
+ self.Pi @ self.expec)
# Obs Equation
if generate_obs:
self.obs_matrix = Matrix(eye(self.n_obs))
self.obs_offset = Matrix(zeros(self.n_obs))
else:
self.obs_matrix = self.obs_equations.jacobian(self.endog)
self.obs_offset = self.obs_equations - self.obs_matrix @ self.endog
def _calc_posterior(self, theta):
P = self._calc_prior(theta)
L = self._log_likelihood(theta)
f = P + L
return f*1000 # x1000 is here to increase precison of the posterior mode-finding algorithm.
def _calc_prior(self, theta):
prior_dict = self.prior_dict
df_prior = self.prior_info.copy()
df_prior['pdf'] = nan
for param in prior_dict.keys():
mu = df_prior.loc[str(param)]['mean']
sigma = df_prior.loc[str(param)]['std']
dist = df_prior.loc[str(param)]['distribution'].lower()
theta_i = theta[param]
# since we are goig to take logs, the density function only needs the terms that depend on
# theta_i, this will help speed up the code a little and will not affect optimization output.
if dist == 'beta':
a = ((mu ** 2) * (1 - mu)) / (sigma ** 2) - mu
b = a * mu / (1 - mu)
pdf_i = (theta_i**(a - 1)) * ((1 - theta_i)**(b - 1))
elif dist == 'gamma':
a = (mu/sigma)**2
b = mu/a
pdf_i = theta_i**(a - 1) * exp(-theta_i/b)
elif dist == 'invgamma':
a = (mu/sigma)**2 + 2
b = mu * (a - 1)
pdf_i = (theta_i**(- a - 1)) * exp(-b/theta_i)
elif dist == 'uniform':
a = mu - sqrt(3) * sigma
b = 2 * mu - a
pdf_i = 1/(b - a)
else: # Normal
pdf_i = exp(-((theta_i - mu)**2)/(2 * (sigma**2)))
df_prior.loc[str(param), 'pdf'] = pdf_i
df_prior['log pdf'] = log(df_prior['pdf'].astype(float))
P = df_prior['log pdf'].sum()
return P
def _log_likelihood(self, theta):
Gamma0, Gamma1, Psi, Pi, C_in, obs_matrix, obs_offset = self._eval_matrix(theta, to_array=True)
for mat in [Gamma0, Gamma1, Psi, Pi, C_in]:
if isnan(mat).any() or isinf(mat).any():
return -inf
G1, C_out, impact, fmat, fwt, ywt, gev, eu, loose = gensys(Gamma0, Gamma1, C_in, Psi, Pi)
if eu[0] == 1 and eu[1] == 1:
# TODO add observation covariance to allow for measurment errors
kf = KalmanFilter(G1, obs_matrix, impact @ impact.T, None, C_out.reshape(self.n_state),
obs_offset.reshape(self.n_obs))
L = kf.loglikelihood(self.data)
else:
L = - inf
return L
def _eval_matrix(self, theta, to_array):
if to_array:
# state matrices
Gamma0 = array(self.Gamma0.subs(theta)).astype(float)
Gamma1 = array(self.Gamma1.subs(theta)).astype(float)
Psi = array(self.Psi.subs(theta)).astype(float)
Pi = array(self.Pi.subs(theta)).astype(float)
C_in = array(self.C_in.subs(theta)).astype(float)
# observation matrices
obs_matrix = array(self.obs_matrix.subs(theta)).astype(float)
obs_offset = array(self.obs_offset.subs(theta)).astype(float)
else:
# state matrices
Gamma0 = self.Gamma0.subs(theta)
Gamma1 = self.Gamma1.subs(theta)
Psi = self.Psi.subs(theta)
Pi = self.Pi.subs(theta)
C_in = self.C_in.subs(theta)
# observation matrices
obs_matrix = self.obs_matrix.subs(theta)
obs_offset = self.obs_offset.subs(theta)
return Gamma0, Gamma1, Psi, Pi, C_in, obs_matrix, obs_offset
def _get_prior_info(self):
prior_info = self.prior_dict
param_names = [str(s) for s in list(self.params)]
df_prior = pd.DataFrame(columns=['distribution', 'mean', 'std', 'param a', 'param b'],
index=param_names)
for param in prior_info.keys():
mu = prior_info[param]['mean']
sigma = prior_info[param]['std']
dist = prior_info[param]['dist'].lower()
if dist == 'beta':
a = ((mu ** 2) * (1 - mu)) / (sigma ** 2) - mu
b = a * mu / (1 - mu)
elif dist == 'gamma':
a = (mu / sigma) ** 2
b = mu / a
elif dist == 'invgamma':
a = (mu / sigma) ** 2 + 2
b = mu * (a - 1)
elif dist == 'uniform':
a = mu - sqrt(3) * sigma
b = 2 * mu - a
else: # Normal
a = mu
b = sigma
df_prior.loc[str(param)] = [dist, mu, sigma, a, b]
return df_prior
def _res2irr(self, theta_res):
prior_info = self.prior_info
theta_irr = theta_res.copy()
for param in theta_res.keys():
a = prior_info.loc[str(param)]['param a']
b = prior_info.loc[str(param)]['param b']
dist = prior_info.loc[str(param)]['distribution'].lower()
theta_i = theta_res[param]
if dist == 'beta':
theta_irr[param] = log(theta_i / (1 - theta_i))
elif dist == 'gamma' or dist == 'invgamma':
theta_irr[param] = log(theta_i)
elif dist == 'uniform':
theta_irr[param] = log((theta_i - a) / (b - theta_i))
else: # Normal
theta_irr[param] = theta_i
return theta_irr
def _irr2res(self, theta_irr):
prior_info = self.prior_info
theta_res = theta_irr.copy()
for param in theta_irr.keys():
a = prior_info.loc[str(param)]['param a']
b = prior_info.loc[str(param)]['param b']
dist = prior_info.loc[str(param)]['distribution'].lower()
lambda_i = theta_irr[param]
if dist == 'beta':
theta_res[param] = exp(lambda_i) / (1 + exp(lambda_i))
elif dist == 'gamma':
theta_res[param] = exp(lambda_i)
elif dist == 'invgamma':
theta_res[param] = exp(lambda_i)
elif dist == 'uniform':
theta_res[param] = (a + b * exp(lambda_i)) / (1 + exp(lambda_i))
else: # Normal
theta_res[param] = lambda_i
return theta_res
def _plot_chains(self, chains, show_charts):
n_cols = int(self.n_param ** 0.5)
n_rows = n_cols + 1 if self.n_param > n_cols ** 2 else n_cols
subplot_shape = (n_rows, n_cols)
plt.figure(figsize=(7*1.61, 7))
for count, param in enumerate(list(self.params)):
ax = plt.subplot2grid(subplot_shape, (count // n_cols, count % n_cols))
ax.plot(chains[str(param)], linewidth=0.5, color='darkblue')
ax.set_title(self.prior_dict[param]['label'])
plt.tight_layout()
if show_charts:
plt.show()
def _plot_prior_posterior(self, chains, show_charts):
n_bins = int( | sqrt(chains.shape[0]) | numpy.sqrt |
# coding=utf-8
# Copyright 2021 The Ravens Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing
# permissions and
# limitations under the License.
"""Kitting Tasks."""
import os
import numpy as np
from ravens.tasks.task import Task
from ravens.utils import utils
class AssemblingKits(Task):
"""Kitting Tasks base class."""
def __init__(self):
super().__init__()
self.max_steps = 4
self.rot_eps = np.deg2rad(30)
# CHANGE THIS WHEN YOU ADD MORE OBJS
self.train_set = [0,2,3,4,5,7,8,9,10]
self.test_set = [3,7]
self.homogeneous = False
def reset(self, env):
super().reset(env)
# Add kit.
kit_size = (0.39, 0.3, 0.0005)
kit_urdf = 'kitting/kit.urdf'
kit_pose = self.get_pose(env, kit_size)
env.add_object(kit_urdf, kit_pose, 'fixed')
if self.mode == 'train':
n_objects = 3
obj_shapes = np.random.choice(self.train_set, n_objects)
else:
if self.homogeneous:
n_objects = 2
obj_shapes = [np.random.choice(self.test_set)] * n_objects
else:
n_objects = 2
obj_shapes = np.random.choice(self.test_set, n_objects)
colors = [
utils.COLORS['purple'], utils.COLORS['blue'], utils.COLORS['green'],
utils.COLORS['yellow'], utils.COLORS['red']
]
symmetry = [
2 * np.pi, 2 * np.pi, 2 * np.pi / 3, np.pi / 2, np.pi / 2, 2 * np.pi,
np.pi, 2 * np.pi / 5, np.pi, np.pi / 2, 2 * np.pi / 5, 0, 2 * np.pi,
2 * np.pi, 2 * np.pi, 2 * np.pi, 0, 2 * np.pi / 6, 2 * np.pi, 2 * np.pi
]
# Build kit.
targets = []
if self.mode == 'test':
targ_pos = [[0, -0.05, -0.001],
[0, 0.05, -0.001]]
else:
targ_pos = [[-0.125, 0.12, -0.001],
[0.072, 0.07, -0.001],
[0.125, -0.13, -0.001]]
template = 'kitting/object-template.urdf'
for i in range(n_objects):
shape = os.path.join(self.assets_root, 'kitting',
f'{obj_shapes[i]:02d}.obj')
if obj_shapes[i] == 7:
scale = [0.006, 0.006, 0.0007]
else:
scale = [0.006, 0.006, 0.002] # .0005
pos = utils.apply(kit_pose, targ_pos[i])
if self.mode == 'train':
theta = | np.random.rand() | numpy.random.rand |
"""This module contains Model implementations that utilize the MPNN model as their underlying
model"""
from functools import partial
import json
import logging
from pathlib import Path
from typing import Iterable, List, NoReturn, Optional, Sequence, Tuple
import warnings
import numpy as np
import pytorch_lightning as pl
from pytorch_lightning.callbacks.early_stopping import EarlyStopping
import ray
from ray.util.sgd.v2 import Trainer
import torch
from tqdm import tqdm
from molpal.models.chemprop.data.data import MoleculeDatapoint, MoleculeDataset, MoleculeDataLoader
from molpal.models.chemprop.data.scaler import StandardScaler
from molpal.models.chemprop.data.utils import split_data
from molpal.models.base import Model
from molpal.models import mpnn
from molpal.utils import batches
logging.getLogger("lightning").setLevel(logging.FATAL)
warnings.filterwarnings(
"ignore", ".*Trying to infer the `batch_size` from an ambiguous collection.*"
)
class MPNN:
"""A message-passing neural network base class
This class serves as a wrapper for the Chemprop MoleculeModel, providing
convenience and modularity in addition to uncertainty quantification
methods as originally implemented in the Chemprop confidence branch
Attributes
----------
ncpu : int
the number of cores over which to parallelize input batch preparation
ddp : bool
whether to train the model over a distributed setup. Only works with
CUDA >= 11.0
precision : int
the precision with which to train the model represented in the number
of bits
model : MoleculeModel
the underlying chemprop model on which to train and make predictions
uncertainty : Optional[str], default=None
the uncertainty quantification method the model uses. None if it
does not use any uncertainty quantification
loss_func : Callable
the loss function used in model training
batch_size : int
the size of each minibatch during training
epochs : int
the number of epochs over which to train
dataset_type : str
the type of dataset. Choices: ('regression')
TODO: add support for classification
num_tasks : int
the number of training tasks
use_gpu : bool
whether the GPU will be used.
NOTE: If a GPU is detected, it will be used. If this is undesired, set
the CUDA_VISIBLE_DEVICES environment variable to be empty
num_workers : int
the number of workers to distribute model training over. Equal to the
number of GPUs detected, or if none are available, the ratio of total
CPUs on detected on the ray cluster over the number of CPUs to dedicate
to each dataloader
train_config : Dict
a dictionary containing the configuration of training variables:
learning rates, maximum epochs, validation metric, etc.
scaler : StandardScaler
a scaler to normalize target data before training and validation and
to reverse transform prediction outputs
"""
def __init__(
self,
batch_size: int = 50,
uncertainty: Optional[str] = None,
dataset_type: str = "regression",
num_tasks: int = 1,
atom_messages: bool = False,
hidden_size: int = 300,
bias: bool = False,
depth: int = 3,
dropout: float = 0.0,
undirected: bool = False,
activation: str = "ReLU",
ffn_hidden_size: Optional[int] = None,
ffn_num_layers: int = 2,
metric: str = "rmse",
epochs: int = 50,
warmup_epochs: float = 2.0,
init_lr: float = 1e-4,
max_lr: float = 1e-3,
final_lr: float = 1e-4,
ncpu: int = 1,
ddp: bool = False,
precision: int = 32,
model_seed: Optional[int] = None,
):
self.ncpu = ncpu
self.ddp = ddp
if precision not in (16, 32):
raise ValueError(f'arg: "precision" can only be (16, 32)! got: {precision}')
self.precision = precision
self.model = mpnn.MoleculeModel(
uncertainty=uncertainty,
dataset_type=dataset_type,
num_tasks=num_tasks,
atom_messages=atom_messages,
hidden_size=hidden_size,
bias=bias,
depth=depth,
dropout=dropout,
undirected=undirected,
activation=activation,
ffn_hidden_size=ffn_hidden_size,
ffn_num_layers=ffn_num_layers,
)
self.uncertainty = uncertainty
self.dataset_type = dataset_type
self.num_tasks = num_tasks
self.epochs = epochs
self.batch_size = batch_size
self.scaler = None
ngpu = int(ray.cluster_resources().get("GPU", 0))
if ngpu > 0:
self.use_gpu = True
self._predict = ray.remote(num_cpus=ncpu, num_gpus=1)(mpnn.predict)
self.num_workers = ngpu
else:
self.use_gpu = False
self._predict = ray.remote(num_cpus=ncpu)(mpnn.predict)
self.num_workers = int(ray.cluster_resources()["CPU"] // self.ncpu)
self.seed = model_seed
if model_seed is not None:
torch.manual_seed(model_seed)
self.train_config = {
"model": self.model,
"uncertainty": self.uncertainty,
"dataset_type": dataset_type,
"batch_size": self.batch_size,
"warmup_epochs": warmup_epochs,
"max_epochs": self.epochs,
"init_lr": init_lr,
"max_lr": max_lr,
"final_lr": final_lr,
"metric": metric,
}
def train(self, smis: Iterable[str], targets: Sequence[float]) -> bool:
"""Train the model on the inputs SMILES with the given targets"""
train_data, val_data = self.make_datasets(smis, targets)
if self.ddp:
self.train_config["train_data"] = train_data
self.train_config["val_data"] = val_data
trainer = Trainer("torch", self.num_workers, self.use_gpu, {"CPU": self.ncpu})
trainer.start()
results = trainer.run(mpnn.sgd.train_func, self.train_config)
trainer.shutdown()
self.model = results[0]
return True
train_dataloader = MoleculeDataLoader(
dataset=train_data, batch_size=self.batch_size, num_workers=self.ncpu, pin_memory=False
)
val_dataloader = MoleculeDataLoader(
dataset=val_data, batch_size=self.batch_size, num_workers=self.ncpu, pin_memory=False
)
lit_model = mpnn.LitMPNN(self.train_config)
callbacks = [
EarlyStopping("val_loss", patience=10, mode="min"),
mpnn.EpochAndStepProgressBar(),
]
trainer = pl.Trainer(
logger=False,
max_epochs=self.epochs,
callbacks=callbacks,
gpus=1 if self.use_gpu else 0,
precision=self.precision,
enable_model_summary=False,
# log_every_n_steps=len(train_dataloader)
)
trainer.fit(lit_model, train_dataloader, val_dataloader)
return True
def make_datasets(
self, xs: Iterable[str], ys: np.ndarray
) -> Tuple[MoleculeDataset, MoleculeDataset]:
"""Split xs and ys into train and validation datasets"""
if len(ys.shape) == 1:
data = MoleculeDataset(
[MoleculeDatapoint(smiles=[x], targets=[y]) for x, y in zip(xs, ys)]
)
else:
data = MoleculeDataset(
[MoleculeDatapoint(smiles=[x], targets=y) for x, y in zip(xs, ys)]
)
train_data, val_data, _ = split_data(data, sizes=(0.8, 0.2, 0.0), seed=self.seed)
self.scaler = train_data.normalize_targets()
val_data.scale_targets(self.scaler)
return train_data, val_data
def predict(self, smis: Iterable[str]) -> np.ndarray:
"""Generate predictions for the inputs xs
Parameters
----------
smis : Iterable[str]
the SMILES strings for which to generate predictions
Returns
-------
np.ndarray
the array of predictions with shape NxO, where N is the number of
inputs and O is the number of tasks."""
model = ray.put(self.model)
scaler = ray.put(self.scaler)
refs = [
self._predict.remote(
model,
smis,
self.batch_size,
self.ncpu,
self.uncertainty,
scaler,
self.use_gpu,
True,
)
for smis in batches(smis, 20000)
]
preds_chunks = [ray.get(r) for r in tqdm(refs, "Prediction", unit="chunk", leave=False)]
return np.concatenate(preds_chunks)
def save(self, path) -> str:
path = Path(path)
path.mkdir(parents=True, exist_ok=True)
model_path = f"{path}/model.pt"
torch.save(self.model.state_dict(), model_path)
state_path = f"{path}/state.json"
try:
state = {
"model_path": model_path,
"means": self.scaler.means.tolist(),
"stds": self.scaler.stds.tolist(),
}
except AttributeError:
state = {"model_path": model_path}
json.dump(state, open(state_path, "w"), indent=4)
return state_path
def load(self, path):
state = json.load(open(path, "r"))
self.model.load_state_dict(torch.load(state["model_path"]))
try:
self.scaler = StandardScaler(state["means"], state["stds"])
except KeyError:
pass
class MPNModel(Model):
"""Message-passing model that learns feature representations of inputs and
passes these inputs to a feed-forward neural network to predict means"""
def __init__(
self,
test_batch_size: Optional[int] = 1000000,
ncpu: int = 1,
ddp: bool = False,
precision: int = 32,
model_seed: Optional[int] = None,
**kwargs,
):
test_batch_size = test_batch_size or 1000000
self.build_model = partial(
MPNN, ncpu=ncpu, ddp=ddp, precision=precision, model_seed=model_seed
)
self.model = self.build_model()
super().__init__(test_batch_size, **kwargs)
@property
def provides(self):
return {"means"}
@property
def type_(self):
return "mpn"
def train(
self, xs: Iterable[str], ys: Sequence[float], *, retrain: bool = False, **kwargs
) -> bool:
if retrain:
self.model = self.build_model()
return self.model.train(xs, ys)
def get_means(self, xs: Sequence[str]) -> np.ndarray:
preds = self.model.predict(xs)
return preds[:, 0] # assume single-task
def get_means_and_vars(self, xs: List) -> NoReturn:
raise TypeError("MPNModel cannot predict variance!")
def save(self, path) -> str:
return self.model.save(path)
def load(self, path):
self.model.load(path)
class MPNDropoutModel(Model):
"""Message-passing network model that predicts means and variances through
stochastic dropout during model inference"""
def __init__(
self,
test_batch_size: Optional[int] = 1000000,
dropout: float = 0.2,
dropout_size: int = 10,
ncpu: int = 1,
ddp: bool = False,
precision: int = 32,
model_seed: Optional[int] = None,
**kwargs,
):
test_batch_size = test_batch_size or 1000000
self.build_model = partial(
MPNN,
uncertainty="dropout",
dropout=dropout,
ncpu=ncpu,
ddp=ddp,
precision=precision,
model_seed=model_seed,
)
self.model = self.build_model()
self.dropout_size = dropout_size
super().__init__(test_batch_size, **kwargs)
@property
def type_(self):
return "mpn"
@property
def provides(self):
return {"means", "vars", "stochastic"}
def train(
self, xs: Iterable[str], ys: Sequence[float], *, retrain: bool = False, **kwargs
) -> bool:
if retrain:
self.model = self.build_model()
return self.model.train(xs, ys)
def get_means(self, xs: Sequence[str]) -> np.ndarray:
predss = self._get_predictions(xs)
return | np.mean(predss, axis=1) | numpy.mean |
from flask import Flask
from flask import request
from datetime import datetime
import os
import shutil
from PIL import Image
import numpy as np
class ImgServ():
def __init__(self, handler, host = '0.0.0.0', port_number = 12222, save_loc = '.', incoming_fname = 'image.jpg', image_prefix = 'img_', image_format = '.jpg', datetime_delimiter = '_'):
self.handler = handler
self.host = host
self.port_number = port_number
self.app = Flask(__name__)
self.save_loc = save_loc
self.incoming_fname = incoming_fname
self.image_prefix = image_prefix
self.image_format = image_format
self.datetime_delimiter = datetime_delimiter
def imgToNp(imgPath, dtype='int32'):
image = Image.open(imgPath)
image.load()
npArr = | np.asarray(image, dtype=dtype) | numpy.asarray |
import numpy as np
from ray.rllib.utils.framework import try_import_tf, try_import_torch
tf1, tf, tfv = try_import_tf()
torch, _ = try_import_torch()
SMALL_NUMBER = 1e-6
# Some large int number. May be increased here, if needed.
LARGE_INTEGER = 100000000
# Min and Max outputs (clipped) from an NN-output layer interpreted as the
# log(x) of some x (e.g. a stddev of a normal
# distribution).
MIN_LOG_NN_OUTPUT = -20
MAX_LOG_NN_OUTPUT = 2
def huber_loss(x, delta=1.0):
"""Reference: https://en.wikipedia.org/wiki/Huber_loss"""
return np.where(
np.abs(x) < delta,
| np.power(x, 2.0) | numpy.power |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 31 17:14:28 2021
*V5-
@author: akel
controi modelo e malha MT3D(input,opt='string')
input --> dicionário contendo informações da simulação,modelo geologico e meshgrid.
veja a função loadmigeo para maiores informações.
opt ---> Variavel string que define o tipo de malha usada 'tensor' ou 'octree'
se não declarado opt o código roda com a malha tensor.
def utilizadas
easymodelbox Cria uma estrutura 3D com uma condutividade definida
easymodellayer Cria camadas planas
layermesh Realiza mesh nas camadas
boxmesh Realiza mesh nas estruturas do tipo box
runMT Realiza mesh nas estruturas do tipo box
*V2- versão 2 adaptada para rodar com simpeg2020.Sem mesh do tipo octree !
*V3- em processo. adatação para rodar octree
*V4- Adaptações/correções/aperfeicoamentos do mesh octree em camadas
*V5- Inclusão Simulador MT(freq)
**V5.5- Incluir Opt offset MT
**V6- Inclusão topografia
"""
#import SimPEG as simpeg
import numpy as np
import time
import discretize
from SimPEG import utils
from SimPEG.electromagnetics import natural_source as NSEM
from discretize.utils import mkvc, refine_tree_xyz
from discretize.utils import sdiag
from scipy.constants import mu_0, epsilon_0 as eps_0
from scipy.interpolate import griddata as gd
t = time.time()
print('start NMT3D : ', time.ctime())
def modelmesh(input_var,**kwargs):
op=kwargs.get('opt') #tensor
lv=kwargs.get('level') #grau de refinemanto
if lv==None:
lv=1
pass
dx=input_var['dxdydz'][0]
dy=input_var['dxdydz'][1]
dz=input_var['dxdydz'][2]
x_length = input_var['x'] # tamanho do dominio em x
y_length = input_var['y'] # tamanho do dominio em y
z_length = input_var['z'] # tamanho do dominio em z
#Definções de mesh
# # Compute number of base mesh cells required in x and y
nbcx = 2**int(np.round(np.log(x_length/dx)/np.log(2.)))
nbcy = 2**int(np.round(np.log(y_length/dy)/np.log(2.)))
nbcz = 2**int(np.round(np.log(z_length/dz)/np.log(2.)))
hx = [(dx, nbcx)]
hy = [(dy, nbcy)]
hz = [(dz, nbcz)]
if op == None :
M = discretize.TreeMesh([hx, hy, hz], x0='CCC')
layermesh(M,input_var['layer'],lv,opt='S')
if 'box' in input_var:
boxmesh(M,input_var['box'],lv)
pass
M.finalize()
if op == 'tensor':
#M=discretize.TensorMesh([hx, hy,hz], x0=['C', 'C','C'])
hx = [(200, 8, -1.5), (500.0, 40), (200, 8, 1.5)]
hy = [(100, 10, -1.5), (500.0, 80), (100, 10, 1.5)]
hz = [(200, 10, -1.6), (2500.0, 22), (500, 10,1.4)]
M = discretize.TensorMesh([
hx,
hy,
hz,],x0=["C", "C",-120000])
pass
##"Contrução" do modelo (add a condutividade )
sig=np.zeros(M.nC) + 1.0e-18 # define
# inclusão de camadas, se for o caso
if 'layer' in input_var:
print('Add layers')
M,sigBG,sigBG1d=easymodellayer(M,sig,input_var['layer'],input_var['cond'],opt='S')
pass
if 'topofile' in input_var:
print('Building topography')
M=discretize.TreeMesh([hx, hy, hz], x0='CCC')
M,xyz,Zo=loadtopo(M,input_var['topofile'],dx)
if 'box' in input_var:
boxmesh(M,input_var['box'],2)
pass
M.finalize()
sigBG=np.zeros(M.nC) + input_var['cond'][1] #geology
# print('k-->',M.nC)
actv = utils.surface2ind_topo(M, xyz)
print('surface2ind_topo',time.ctime())
actv_ocean=np.invert(actv)
print('np.invert',time.ctime())
index_ocean=np.where(actv_ocean)
print('np.where',time.ctime())
sigBG[index_ocean]=input_var['cond'][0] #ocean
sigBG[(M.gridCC[:,2] > 0) ] =1.0e-18 #atm
# MESH 1D (para modelo de background) #teste
mesh1d = discretize.TensorMesh([M.hz], np.array([M.x0[2]]))
sigBG1d = np.zeros(mesh1d.nC) + 0
sigBG1d[mesh1d.gridCC > 0] = 1e-18 #atm
sigBG1d[mesh1d.gridCC < 0] = input_var['cond'][0] #ocean
sigBG1d[mesh1d.gridCC < Zo] = 1.0e-1 #final layer
print('endtopfile',time.ctime())
pass
#=====
#incluir aqui sigBG1d e add ao return abaixo
#==================
# inclusão de estruturas , se for o caso
if 'box' in input_var:
print('Add geologic body')
M,sig=easymodelbox(M,sigBG,input_var['box'])
pass
return M,sig,sigBG,sigBG1d
#add sigBG1d ??
def easymodelbox(M,S,B):
print('Build Blocks')
modelBG=S
n_box=len(B)/7
for i in range(0,int(n_box),1):
x=B[0+int(i*7)]
Lx=B[1+int(i*7)]
y=B[2+int(i*7)]
Ly=B[3+int(i*7)]
z=B[4+int(i*7)]
Lz=B[5+int(i*7)]
aim_cond=B[6+int(i*7)]
modelBG = utils.model_builder.addBlock(
M.gridCC, modelBG, [x, y, z], [x+Lx, y+Ly, z+Lz],aim_cond)
S= utils.mkvc(modelBG)
return M,S
#função para criar as camadas
def easymodellayer(M,S,camada,cond,**kwargs):
op=kwargs.get('opt')
if op=='B':
print('Model do tipo box')
S[M.gridCC[:, 2] >= 0] = 1.0e-18 # cond. ar
c=0
for i in range(0,len(camada),1):
c=0+c
S[(M.gridCC[:,2] < -c) & (M.gridCC[:,2] >= -camada[i])]=cond[i]
c=camada[i]
S[(M.gridCC[:,2] < -c) ]=cond[i+1]
pass
if op=='S':
print('Building Layers')
print('Model do tipo surface')
M1d = discretize.TensorMesh([M.hz], np.array([M.x0[2]]))
sigBG1d = np.zeros(M1d.nC)
sigBG1d[M1d.gridCC > 0] = 1.0e-18 #cond.ar
X=M.gridCC[:,0];
Z=np.zeros(len(X))
S[(M.gridCC[:,2] < Z) ] = 1.0e-18 #cond. ar
c=0
for i in range(0,len(camada),1):
c=0+c
S[(M.gridCC[:,2] < -c) & (M.gridCC[:,2] >= -camada[i])]=cond[i]
sigBG1d[(M1d.gridCC < -c) & (M1d.gridCC >= -camada[i])]=cond[i]
c=camada[i]
S[(M.gridCC[:,2] < -c) ]=cond[i+1]
sigBG1d[(M1d.gridCC < -c) ]=cond[i+1]
print('E. conductivity',c,cond[i+1])
pass
return M,S,sigBG1d
def layermesh(M,camada,lv,**kwargs):
lv=2 # camadasfixasdas com level->2 comentar p/ level igual dos boxs
op=kwargs.get('opt')
if op=='B':
print('Mesh do tipo box')
#z=0
xp, yp, zp = np.meshgrid( [-np.sum(M.h[0])/2, np.sum(M.h[0])/2],
[-np.sum(M.h[1])/2,np.sum(M.h[1])/2],
[-0-1*M.h[2][0],-0+1*M.h[2][0]])
xyz = np.c_[mkvc(xp), mkvc(yp),mkvc(zp)]
M = refine_tree_xyz(
M, xyz, octree_levels=[lv,lv,lv], method='box',
finalize=False)
#add camadas
for i in range(0,len(camada),1):
xp, yp, zp = np.meshgrid( [-np.sum(M.h[0])/2, np.sum(M.h[0])/2],
[-np.sum(M.h[1])/2,np.sum(M.h[1])/2],
[-camada[i]-1*M.h[2][0],
-camada[i]+1*M.h[2][0]])
xyz = np.c_[mkvc(xp), mkvc(yp),mkvc(zp)]
M = refine_tree_xyz(M, xyz, octree_levels=[lv,lv,lv], method='box',
finalize=False)
pass
if op=='S':
print('Mesh do tipo surface!!')
dc=3 # reduz o tamanho da linha de refinamento da camada
xx = np.arange(-np.sum(M.h[0])/dc,np.sum(M.h[0])/dc,M.h[0][0]) #np.sum(Msurf.hxh2) ou dg --> dá as bordas
yy = np.arange(-np.sum(M.h[0])/dc,np.sum(M.h[0])/dc,M.h[0][0])
xx, yy = np.meshgrid(xx, yy)
zz=np.zeros([len(xx),len(xx)])-0
#função de superficie
xyz = np.c_[mkvc(xx), mkvc(yy),mkvc(zz)]
M = refine_tree_xyz(M, xyz, octree_levels=[lv,lv,lv], method='surface', finalize=False)
# add camadas
for i in range(0,len(camada),1):
zz=np.zeros([len(xx),len(xx)])-camada[i]
xyz = np.c_[mkvc(xx), mkvc(yy),mkvc(zz)]
M = refine_tree_xyz(M, xyz, octree_levels=[lv,lv,lv], method='surface', finalize=False)
pass
return
def boxmesh(M,box,lv):
# lv=2
n_box=len(box)/7
fb=4 #fator de discretização ao redor
for i in range(0,int(n_box),1):
x1=box[0+int(i*7)]
x2=x1+box[1+int(i*7)]
y1=box[2+int(i*7)]
y2=y1+box[3+int(i*7)]
z1=box[4+int(i*7)]
z2=z1+box[5+int(i*7)]
#plano1 XY-zbotton
xp, yp, zp = np.meshgrid( [x1, x2],[y1,y2], [z1-fb*M.h[2][0],z1+fb*M.h[2][0]])
xyz = np.c_[mkvc(xp), mkvc(yp),mkvc(zp)]
M = refine_tree_xyz(
M, xyz, octree_levels=[lv,lv,lv], method='box', finalize=False
)
#plano2 XY-ztop
xp, yp, zp = np.meshgrid( [x1,x2],[y1,y2], [z2-fb*M.h[2][0],z2+fb*M.h[2][0]])
xyz = np.c_[mkvc(xp), mkvc(yp),mkvc(zp)]
M = refine_tree_xyz(
M, xyz, octree_levels=[lv,lv,lv], method='box', finalize=False
)
#plano3 XZ-yleft
xp, yp, zp = np.meshgrid( [x1-fb*M.h[0][0],x1+fb*M.h[0][0]],[y1-fb*np.sqrt(3)*M.h[1][0],y2+fb*np.sqrt(3)*M.h[1][0]], [z2+fb*np.sqrt(3)*M.h[2][0],z1-fb*np.sqrt(3)*M.h[2][0]])
xyz = np.c_[mkvc(xp), mkvc(yp),mkvc(zp)]
M = refine_tree_xyz(
M, xyz, octree_levels=[lv,lv,lv], method='box', finalize=False
)
#plano4 XZ-yrigth
xp, yp, zp = np.meshgrid( [x2-fb*M.h[0][0],x2+fb*M.h[0][0]],[y1-fb*np.sqrt(3)*M.h[1][0],y2+fb*np.sqrt(3)*M.h[1][0]], [z2+fb*np.sqrt(3)*M.h[2][0],z1-fb*np.sqrt(3)*M.h[2][0]])
xyz = np.c_[mkvc(xp), mkvc(yp),mkvc(zp)] # mkvc creates vectors
M = refine_tree_xyz(
M, xyz, octree_levels=[lv,lv,lv], method='box', finalize=False
)
#plano5 YZ-Xleft
xp, yp, zp = np.meshgrid( [x1,x2],[y1-fb*M.h[1][0],y1+fb*M.h[1][0]], [z2+fb*np.sqrt(3)*M.h[2][0],z1-fb*np.sqrt(3)*M.h[2][0]])
xyz = np.c_[mkvc(xp), mkvc(yp),mkvc(zp)] # mkvc creates vectors
M = refine_tree_xyz(
M, xyz, octree_levels=[lv,lv,lv], method='box', finalize=False
)
#plano6 YZ-Xrigth
xp, yp, zp = np.meshgrid( [x1,x2],[y2-fb*M.h[1][0],y2+fb*M.h[1][0]], [z2+fb*np.sqrt(3)*M.h[2][0],z1-fb*np.sqrt(3)*M.h[2][0]])
xyz = np.c_[mkvc(xp), mkvc(yp),mkvc(zp)] # mkvc creates vectors
M = refine_tree_xyz(
M, xyz, octree_levels=[lv,lv,lv], method='box', finalize=False
)
return
def pyvista_view(input_var,ftr):
import pyvista as pv
dx=input_var['dxdydz'][0]
dy=input_var['dxdydz'][1]
dz=input_var['dxdydz'][2]
x_length = input_var['x'] # tamanho do dominio em x
y_length = input_var['y'] # tamanho do dominio em y
z_length = input_var['z'] # tamanho do dominio em z
#Definções de mesh
# Compute number of base mesh cells required in x and y
nbcx = 2**int(np.round(np.log(x_length/dx)/np.log(2.)))
nbcy = 2**int(np.round(np.log(y_length/dy)/np.log(2.)))
nbcz = 2**int(np.round(np.log(z_length/dz)/np.log(2.)))
hx = [(dx, nbcx)]
hy = [(dy, nbcy)]
hz = [(dz, nbcz/2)]
M=discretize.TensorMesh([hx, hy,hz], x0=['C', 'C', -dz*nbcz/2])
sig=np.zeros(M.nC) + 1e-18 # define
#inclusão de camadas, se for o caso
if 'layer' in input_var:
easymodellayer(M,sig,input_var['layer'],input_var['cond'],opt='S')
pass
sigBG = sig
#inclusão de estruturas , se for o caso
if 'box' in input_var:
easymodelbox(M,sigBG,input_var['box'])
pass
models = {'res':np.log10(sig)}
dataset = M.toVTK(models)
p = pv.Plotter(notebook=0)
p.show_grid(location='outer')
#
#
p.add_mesh(dataset.slice('x'), opacity=0.75, name='x-slice')
p.add_mesh(dataset.slice('y'), opacity=0.75, name='y-slice')
p.add_mesh(dataset.slice('z'), opacity=0.75, name='z-slice')
# p.add_mesh(threshed, name='vol')
p.add_mesh(dataset.threshold([np.log10(ftr)-0.1,np.log10(ftr)]), name='vol')
p.show()
#
return
def runMT(M,S,Sbg,Sbg1d,fq):
try:
from pymatsolver import Pardiso as Solver
except:
from SimPEG import Solver
nFreq = len(fq)
rx_x = np.array([0.])
rx_y = np.array([0.])
rx_loc = np.hstack((utils.mkvc(rx_x, 2), utils.mkvc(rx_y, 2), np.zeros((np.prod(rx_x.shape), 1))))
# Receivers
rxList = []
for rx_orientation in ['xx', 'xy', 'yx', 'yy']:
rxList.append(NSEM.Rx.Point_impedance3D(rx_loc, rx_orientation, 'real'))
rxList.append(NSEM.Rx.Point_impedance3D(rx_loc, rx_orientation, 'imag'))
for rx_orientation in ['zx', 'zy']:
rxList.append(NSEM.Rx.Point_tipper3D(rx_loc, rx_orientation, 'real'))
rxList.append(NSEM.Rx.Point_tipper3D(rx_loc, rx_orientation, 'imag'))
# Source list,
srcList = []
for freq in fq:
srcList.append(NSEM.Src.Planewave_xy_1Dprimary(rxList, freq, Sbg1d, Sbg))
# Make the survey
survey = NSEM.Survey(srcList)
# Set the problem
problem = NSEM.Problem3D_ePrimSec(M, sigma=S, sigmaPrimary=Sbg)
problem.pair(survey)
problem.Solver = Solver
# Calculate the data
fields = problem.fields()
# Calculate the data
fields = problem.fields() # returns secondary field
rec_x = np.array([-200]) #M.getTensor('Ex')[0]
rec_y = np.zeros(np.prod(rec_x.shape))
rec_z = np.zeros(np.prod(rec_x.shape))
pos_rx = np.hstack((utils.mkvc(rec_x,2),utils.mkvc(rec_y,2),utils.mkvc(rec_z,2)))
grid_field_px = np.empty((M.nE,nFreq),dtype=complex)
grid_field_py = np.empty((M.nE,nFreq),dtype=complex)
for i in range(nFreq):
grid_field_px[:,i] = np.transpose(fields._getField('e_pxSolution', i))
grid_field_py[:,i] = np.transpose(fields._getField('e_pySolution', i))
# campos E e H calculado em todas as arestas d malha
e_px_full = fields._e_px(grid_field_px, srcList)
e_py_full = fields._e_py(grid_field_py, srcList)
h_px_full = fields._b_px(grid_field_px, srcList)/mu_0
h_py_full = fields._b_py(grid_field_py, srcList)/mu_0
# Interpolando campos nos locais do receptor
Pex = M.getInterpolationMat(pos_rx,'Ex')
ex_px = Pex*e_px_full
ex_py = Pex*e_py_full
Pey = M.getInterpolationMat(pos_rx,'Ex')
ey_px = Pey*e_px_full
ey_py = Pey*e_py_full
Pbx = M.getInterpolationMat(pos_rx,'Fx')
hx_px = Pbx*h_px_full
hx_py = Pbx*h_py_full
Pby = M.getInterpolationMat(pos_rx,'Fy')
hy_px = Pby*h_px_full
hy_py = Pby*h_py_full
hd = sdiag(mkvc(1.0/ (sdiag(mkvc(hx_px,2)) * mkvc(hy_py,2) - sdiag(mkvc(hx_py,2)) * mkvc(hy_px,2)),2))
orientation = 'xy'
if "xx" in orientation:
Zij = ex_px * hy_py - ex_py * hy_px
elif "xy" in orientation:
Zij = -ex_px * hx_py + ex_py * hx_px
elif "yx" in orientation:
Zij = ey_px * hy_py - ey_py * hy_px
elif "yy" in orientation:
Zij = -ey_px * hx_py + ey_py * hx_px
# Calculate the complex value
Imped = hd * mkvc(Zij,2)
rho_app = np.empty((nFreq),dtype=float)
phs = np.empty((nFreq),dtype=float)
for i in range(nFreq):
print("freq:", fq[i])
rho_app[i] = 1/(2*np.pi*fq[i]*mu_0) * abs(Imped[i])**2
phs[i] = np.arctan2(Imped[i].imag, Imped[i].real)*(180./np.pi)
return fq,rho_app,phs
def loadtopo(M,filename,delta):
tp=np.loadtxt(filename);
lv=2
X=tp[:,0];
Y=tp[:,1];
Z=tp[:,2];
LX=max(X)-(min(X))
LY=max(Y)-(min(Y))
dxi=delta
dyi=delta
nxi=LX/dxi #número de celulas em x
nyi=LY/dyi #numero de celulas em y
xi = np.linspace(min(X), max(X), int(nxi))
yi = np.linspace(min(Y), max(Y), int(nyi))
xi, yi = | np.meshgrid(xi, yi) | numpy.meshgrid |
import logging
logging.disable(logging.CRITICAL)
import numpy as np
import copy
import time as timer
import torch
import torch.nn as nn
from torch.autograd import Variable
# samplers
import mjrl.samplers.trajectory_sampler as trajectory_sampler
import mjrl.samplers.batch_sampler as batch_sampler
# utility functions
import mjrl.utils.process_samples as process_samples
from mjrl.utils.logger import DataLog
import pickle
import os
class BatchREINFORCEFTW:
def __init__(self, all_env, policy, all_baseline,
learn_rate=0.01,
seed=None,
save_logs=False,
new_col_mode='regularize'):
self.all_env = all_env
self.policy = policy
self.all_baseline = all_baseline
self.theta = learn_rate
self.seed = seed
self.save_logs = save_logs
self.running_score = {}
self.save_logs = save_logs
if save_logs: self.logger = {}
self.d = policy.model.L.shape[0]
self.A = np.zeros(( self.d * self.policy.k, self.d * self.policy.k))
self.B = np.zeros((self.d * self.policy.k, 1))
self.theta = {}
self.grad = {}
self.hess = {}
self.new_col_mode = new_col_mode
def set_task(self, task_id):
self.env = self.all_env[task_id]
self.policy.set_task(task_id)
self.baseline = self.all_baseline[task_id]
if task_id not in self.observed_tasks:
if self.save_logs: self.logger[task_id] = DataLog()
self.observed_tasks.add(task_id)
def CPI_surrogate(self, observations, actions, advantages):
adv_var = Variable(torch.from_numpy(advantages).float(), requires_grad=False)
old_dist_info = self.policy.old_dist_info(observations, actions)
new_dist_info = self.policy.new_dist_info(observations, actions)
LR = self.policy.likelihood_ratio(new_dist_info, old_dist_info)
surr = torch.mean(LR*adv_var)
return surr
def kl_old_new(self, observations, actions):
old_dist_info = self.policy.old_dist_info(observations, actions)
new_dist_info = self.policy.new_dist_info(observations, actions)
mean_kl = self.policy.mean_kl(new_dist_info, old_dist_info)
return mean_kl
def flat_vpg(self, observations, actions, advantages):
A = Variable(torch.from_numpy(self.A).float(), requires_grad=False)
B = Variable(torch.from_numpy(self.B).float(), requires_grad=False)
vecLT = self.policy.model.L.reshape((-1,1)) # because of the way torch orders upon reshape, this is equivalent to vec(L^\top)
cpi_surr = self.CPI_surrogate(observations, actions, advantages)
if self.policy.model.T > self.policy.k: # regularize S
objective = cpi_surr - 1e-5*torch.norm(self.policy.trainable_params[1], 1)
else: # regularize nothing (equivalent to training STL)
objective = cpi_surr
vpg_grad = torch.autograd.grad(objective, self.policy.trainable_params)#, retain_graph=True)
vpg_grad = np.concatenate([g.contiguous().view(-1).data.numpy() for g in vpg_grad])
return vpg_grad
def grad_and_hess(self, observations, actions, advantages, theta):
cpi_surr = self.CPI_surrogate(observations, actions, advantages)
objective = cpi_surr
vpg_grad = torch.autograd.grad(objective, [theta], create_graph=True)[0]
log_std = self.policy.log_std
adv_var = Variable(torch.from_numpy(advantages).float(), requires_grad=False).reshape(-1,1)
obs_var = Variable(torch.from_numpy(observations).float(), requires_grad=False)
obs_var = torch.cat([obs_var, torch.ones(obs_var.shape[0],1)], dim=1)
vpg_grad = vpg_grad.data.numpy()
# delete this
adv_var = adv_var - adv_var.max()
hess_tmp = torch.mm(torch.t(obs_var * adv_var), obs_var).data.numpy() / adv_var.numel()
log_std = log_std.data.numpy()
hess_tmp = np.kron(np.diag(1/np.exp(log_std)), hess_tmp)
vpg_hess = hess_tmp
vpg_hess = (vpg_hess + vpg_hess.T) / 2
return vpg_grad, vpg_hess
# ----------------------------------------------------------
def train_step(self, N,
sample_mode='trajectories',
env_name=None,
T=1e6,
gamma=0.995,
gae_lambda=0.98,
num_cpu='max',
task_id=0):
# Clean up input arguments
if env_name is None: env_name = self.env.env_id
if sample_mode != 'trajectories' and sample_mode != 'samples':
print("sample_mode in NPG must be either 'trajectories' or 'samples'")
quit()
ts = timer.time()
if sample_mode == 'trajectories':
policy_copy = self.copy_policy_for_detach()
paths = trajectory_sampler.sample_paths_parallel(N, policy_copy, T, env_name,
self.seed, num_cpu)
elif sample_mode == 'samples':
paths = batch_sampler.sample_paths(N, self.policy, T, env_name=env_name,
pegasus_seed=self.seed, num_cpu=num_cpu)
if self.save_logs:
self.logger[task_id].log_kv('time_sampling', timer.time() - ts)
self.seed = self.seed + N if self.seed is not None else self.seed
# compute returns
process_samples.compute_returns(paths, gamma)
# compute advantages
process_samples.compute_advantages(paths, self.baseline, gamma, gae_lambda)
# train from paths
eval_statistics = self.train_from_paths(paths, task_id)
eval_statistics.append(N)
# fit baseline
if self.save_logs:
ts = timer.time()
error_before, error_after = self.baseline.fit(paths, return_errors=True)
self.logger[task_id].log_kv('time_VF', timer.time()-ts)
self.logger[task_id].log_kv('VF_error_before', error_before)
self.logger[task_id].log_kv('VF_error_after', error_after)
else:
self.baseline.fit(paths)
return eval_statistics
# ----------------------------------------------------------
def train_from_paths(self, paths, task_id):
# Concatenate from all the trajectories
observations = np.concatenate([path["observations"] for path in paths])
actions = np.concatenate([path["actions"] for path in paths])
advantages = np.concatenate([path["advantages"] for path in paths])
# Advantage whitening
advantages = (advantages - np.mean(advantages)) / (np.std(advantages) + 1e-6)
# cache return distributions for the paths
path_returns = [sum(p["rewards"]) for p in paths]
mean_return = np.mean(path_returns)
std_return = np.std(path_returns)
min_return = np.amin(path_returns)
max_return = np.amax(path_returns)
base_stats = [mean_return, std_return, min_return, max_return]
if task_id in self.running_score:
self.running_score[task_id] = 0.9*self.running_score[task_id] + 0.1*mean_return
else:
self.running_score[task_id] = mean_return
if self.save_logs: self.log_rollout_statistics(paths, task_id)
# Keep track of times for various computations
t_gLL = 0.0
# Optimization algorithm
# --------------------------
surr_before = self.CPI_surrogate(observations, actions, advantages).data.numpy().ravel()[0]
# VPG
ts = timer.time()
vpg_grad = self.flat_vpg(observations, actions, advantages)
t_gLL += timer.time() - ts
# Policy update
# --------------------------
curr_params = self.policy.get_param_values(task_id)
new_params = curr_params + self.theta * vpg_grad
self.policy.set_param_values(new_params, task_id, set_new=True, set_old=False)
surr_after = self.CPI_surrogate(observations, actions, advantages).data.numpy().ravel()[0]
kl_dist = self.kl_old_new(observations, actions).data.numpy().ravel()[0]
self.policy.set_param_values(new_params, task_id, set_new=True, set_old=True)
# Log information
if self.save_logs:
self.logger[task_id].log_kv('alpha_{}'.format(task_id), self.theta)
self.logger[task_id].log_kv('time_vpg_{}'.format(task_id), t_gLL)
self.logger[task_id].log_kv('kl_dist_{}'.format(task_id), kl_dist)
self.logger[task_id].log_kv('surr_improvement_{}'.format(task_id), surr_after - surr_before)
self.logger[task_id].log_kv('running_score_{}'.format(task_id), self.running_score[task_id])
return base_stats
def test_tasks(self, task_ids=None,
test_rollouts=10,
num_cpu=1,
update_s=False):
if task_ids is None:
task_ids = list(self.observed_tasks)
mean_pol_perf = {}
for task_id in task_ids:
self.set_task(task_id)
policy_copy = self.copy_policy_for_detach()
eval_paths = trajectory_sampler.sample_paths_parallel(N=test_rollouts, policy=policy_copy, num_cpu=num_cpu,
env_name=self.env.env_id, mode='evaluation', pegasus_seed=self.seed)
mean_pol_perf[task_id] = np.mean([np.sum(path['rewards']) for path in eval_paths])
self.seed = self.seed + test_rollouts if self.seed is not None else self.seed
return mean_pol_perf
def data_for_grad(self, N,
sample_mode='trajectories',
env_name=None,
T=1e6,
gamma=0.995,
gae_lambda=0.98,
num_cpu='max',
task_id=0,
returns=False):
# Clean up input arguments
if env_name is None: env_name = self.env.env_id
if sample_mode != 'trajectories' and sample_mode != 'samples':
print("sample_mode in NPG must be either 'trajectories' or 'samples'")
quit()
ts = timer.time()
if sample_mode == 'trajectories':
policy_copy = self.copy_policy_for_detach()
paths = trajectory_sampler.sample_paths_parallel(N, policy_copy, T, env_name,
self.seed, num_cpu)
elif sample_mode == 'samples':
paths = batch_sampler.sample_paths(N, self.policy, T, env_name=env_name,
pegasus_seed=self.seed, num_cpu=num_cpu)
if self.save_logs:
self.logger[task_id].log_kv('time_sampling_hess', timer.time() - ts)
self.seed = self.seed + N if self.seed is not None else self.seed
# compute returns
process_samples.compute_returns(paths, gamma)
# compute advantages
process_samples.compute_advantages(paths, self.baseline, gamma, gae_lambda)
# Concatenate from all the trajectories
observations = np.concatenate([path["observations"] for path in paths])
actions = np.concatenate([path["actions"] for path in paths])
advantages = np.concatenate([path["advantages"] for path in paths])
# Advantage whitening
advantages = (advantages - np.mean(advantages)) / (np.std(advantages) + 1e-6)
# cache return distributions for the paths
path_returns = [sum(p["rewards"]) for p in paths]
mean_return = np.mean(path_returns)
std_return = np.std(path_returns)
min_return = | np.amin(path_returns) | numpy.amin |
# Copyright (c) 2003-2019 by <NAME>
#
# TreeCorr is free software: redistribution and use in source and binary forms,
# with or without modification, are permitted provided that the following
# conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions, and the disclaimer given in the accompanying LICENSE
# file.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the disclaimer given in the documentation
# and/or other materials provided with the distribution.
from __future__ import print_function
import numpy as np
import treecorr
import os
import coord
import fitsio
from test_helper import get_script_name, do_pickle, assert_raises, CaptureLog, timer
from test_helper import is_ccw, is_ccw_3d
@timer
def test_log_binning():
import math
# Test some basic properties of the base class
def check_arrays(nnn):
np.testing.assert_almost_equal(nnn.bin_size * nnn.nbins, math.log(nnn.max_sep/nnn.min_sep))
np.testing.assert_almost_equal(nnn.ubin_size * nnn.nubins, nnn.max_u-nnn.min_u)
np.testing.assert_almost_equal(nnn.vbin_size * nnn.nvbins, nnn.max_v-nnn.min_v)
#print('logr = ',nnn.logr1d)
np.testing.assert_equal(nnn.logr1d.shape, (nnn.nbins,) )
np.testing.assert_almost_equal(nnn.logr1d[0], math.log(nnn.min_sep) + 0.5*nnn.bin_size)
np.testing.assert_almost_equal(nnn.logr1d[-1], math.log(nnn.max_sep) - 0.5*nnn.bin_size)
np.testing.assert_equal(nnn.logr.shape, (nnn.nbins, nnn.nubins, 2*nnn.nvbins) )
np.testing.assert_almost_equal(nnn.logr[:,0,0], nnn.logr1d)
np.testing.assert_almost_equal(nnn.logr[:,-1,-1], nnn.logr1d)
assert len(nnn.logr) == nnn.nbins
#print('u = ',nnn.u1d)
np.testing.assert_equal(nnn.u1d.shape, (nnn.nubins,) )
np.testing.assert_almost_equal(nnn.u1d[0], nnn.min_u + 0.5*nnn.ubin_size)
np.testing.assert_almost_equal(nnn.u1d[-1], nnn.max_u - 0.5*nnn.ubin_size)
np.testing.assert_equal(nnn.u.shape, (nnn.nbins, nnn.nubins, 2*nnn.nvbins) )
np.testing.assert_almost_equal(nnn.u[0,:,0], nnn.u1d)
np.testing.assert_almost_equal(nnn.u[-1,:,-1], nnn.u1d)
#print('v = ',nnn.v1d)
np.testing.assert_equal(nnn.v1d.shape, (2*nnn.nvbins,) )
np.testing.assert_almost_equal(nnn.v1d[0], -nnn.max_v + 0.5*nnn.vbin_size)
np.testing.assert_almost_equal(nnn.v1d[-1], nnn.max_v - 0.5*nnn.vbin_size)
np.testing.assert_almost_equal(nnn.v1d[nnn.nvbins], nnn.min_v + 0.5*nnn.vbin_size)
np.testing.assert_almost_equal(nnn.v1d[nnn.nvbins-1], -nnn.min_v - 0.5*nnn.vbin_size)
np.testing.assert_equal(nnn.v.shape, (nnn.nbins, nnn.nubins, 2*nnn.nvbins) )
np.testing.assert_almost_equal(nnn.v[0,0,:], nnn.v1d)
np.testing.assert_almost_equal(nnn.v[-1,-1,:], nnn.v1d)
def check_defaultuv(nnn):
assert nnn.min_u == 0.
assert nnn.max_u == 1.
assert nnn.nubins == np.ceil(1./nnn.ubin_size)
assert nnn.min_v == 0.
assert nnn.max_v == 1.
assert nnn.nvbins == np.ceil(1./nnn.vbin_size)
# Check the different ways to set up the binning:
# Omit bin_size
nnn = treecorr.NNNCorrelation(min_sep=5, max_sep=20, nbins=20, bin_type='LogRUV')
#print(nnn.min_sep,nnn.max_sep,nnn.bin_size,nnn.nbins)
#print(nnn.min_u,nnn.max_u,nnn.ubin_size,nnn.nubins)
#print(nnn.min_v,nnn.max_v,nnn.vbin_size,nnn.nvbins)
assert nnn.min_sep == 5.
assert nnn.max_sep == 20.
assert nnn.nbins == 20
check_defaultuv(nnn)
check_arrays(nnn)
# Specify min, max, n for u,v too.
nnn = treecorr.NNNCorrelation(min_sep=5, max_sep=20, nbins=20,
min_u=0.2, max_u=0.9, nubins=12,
min_v=0., max_v=0.2, nvbins=2)
#print(nnn.min_sep,nnn.max_sep,nnn.bin_size,nnn.nbins)
#print(nnn.min_u,nnn.max_u,nnn.ubin_size,nnn.nubins)
#print(nnn.min_v,nnn.max_v,nnn.vbin_size,nnn.nvbins)
assert nnn.min_sep == 5.
assert nnn.max_sep == 20.
assert nnn.nbins == 20
assert nnn.min_u == 0.2
assert nnn.max_u == 0.9
assert nnn.nubins == 12
assert nnn.min_v == 0.
assert nnn.max_v == 0.2
assert nnn.nvbins == 2
check_arrays(nnn)
# Omit min_sep
nnn = treecorr.NNNCorrelation(max_sep=20, nbins=20, bin_size=0.1)
#print(nnn.min_sep,nnn.max_sep,nnn.bin_size,nnn.nbins)
#print(nnn.min_u,nnn.max_u,nnn.ubin_size,nnn.nubins)
#print(nnn.min_v,nnn.max_v,nnn.vbin_size,nnn.nvbins)
assert nnn.bin_size == 0.1
assert nnn.max_sep == 20.
assert nnn.nbins == 20
check_defaultuv(nnn)
check_arrays(nnn)
# Specify max, n, bs for u,v too.
nnn = treecorr.NNNCorrelation(max_sep=20, nbins=20, bin_size=0.1,
max_u=0.9, nubins=3, ubin_size=0.05,
max_v=0.4, nvbins=4, vbin_size=0.05)
#print(nnn.min_sep,nnn.max_sep,nnn.bin_size,nnn.nbins)
#print(nnn.min_u,nnn.max_u,nnn.ubin_size,nnn.nubins)
#print(nnn.min_v,nnn.max_v,nnn.vbin_size,nnn.nvbins)
assert nnn.bin_size == 0.1
assert nnn.max_sep == 20.
assert nnn.nbins == 20
assert np.isclose(nnn.ubin_size, 0.05)
assert np.isclose(nnn.min_u, 0.75)
assert nnn.max_u == 0.9
assert nnn.nubins == 3
assert np.isclose(nnn.vbin_size, 0.05)
assert np.isclose(nnn.min_v, 0.2)
assert nnn.max_v == 0.4
assert nnn.nvbins == 4
check_arrays(nnn)
# Omit max_sep
nnn = treecorr.NNNCorrelation(min_sep=5, nbins=20, bin_size=0.1)
#print(nnn.min_sep,nnn.max_sep,nnn.bin_size,nnn.nbins)
#print(nnn.min_u,nnn.max_u,nnn.ubin_size,nnn.nubins)
#print(nnn.min_v,nnn.max_v,nnn.vbin_size,nnn.nvbins)
assert nnn.bin_size == 0.1
assert nnn.min_sep == 5.
assert nnn.nbins == 20
check_defaultuv(nnn)
check_arrays(nnn)
# Specify min, n, bs for u,v too.
nnn = treecorr.NNNCorrelation(min_sep=5, nbins=20, bin_size=0.1,
min_u=0.7, nubins=4, ubin_size=0.05,
min_v=0.2, nvbins=4, vbin_size=0.05)
#print(nnn.min_sep,nnn.max_sep,nnn.bin_size,nnn.nbins)
#print(nnn.min_u,nnn.max_u,nnn.ubin_size,nnn.nubins)
#print(nnn.min_v,nnn.max_v,nnn.vbin_size,nnn.nvbins)
assert nnn.min_sep == 5.
assert nnn.bin_size == 0.1
assert nnn.nbins == 20
assert nnn.min_u == 0.7
assert np.isclose(nnn.ubin_size, 0.05)
assert nnn.nubins == 4
assert nnn.min_v == 0.2
assert nnn.max_v == 0.4
assert np.isclose(nnn.vbin_size, 0.05)
assert nnn.nvbins == 4
check_arrays(nnn)
# Omit nbins
nnn = treecorr.NNNCorrelation(min_sep=5, max_sep=20, bin_size=0.1)
#print(nnn.min_sep,nnn.max_sep,nnn.bin_size,nnn.nbins)
#print(nnn.min_u,nnn.max_u,nnn.ubin_size,nnn.nubins)
#print(nnn.min_v,nnn.max_v,nnn.vbin_size,nnn.nvbins)
assert nnn.bin_size <= 0.1
assert nnn.min_sep == 5.
assert nnn.max_sep == 20.
check_defaultuv(nnn)
check_arrays(nnn)
# Specify min, max, bs for u,v too.
nnn = treecorr.NNNCorrelation(min_sep=5, max_sep=20, bin_size=0.1,
min_u=0.2, max_u=0.9, ubin_size=0.03,
min_v=0.1, max_v=0.3, vbin_size=0.07)
#print(nnn.min_sep,nnn.max_sep,nnn.bin_size,nnn.nbins)
#print(nnn.min_u,nnn.max_u,nnn.ubin_size,nnn.nubins)
#print(nnn.min_v,nnn.max_v,nnn.vbin_size,nnn.nvbins)
assert nnn.min_sep == 5.
assert nnn.max_sep == 20.
assert nnn.bin_size <= 0.1
assert nnn.min_u == 0.2
assert nnn.max_u == 0.9
assert nnn.nubins == 24
assert np.isclose(nnn.ubin_size, 0.7/24)
assert nnn.min_v == 0.1
assert nnn.max_v == 0.3
assert nnn.nvbins == 3
assert np.isclose(nnn.vbin_size, 0.2/3)
check_arrays(nnn)
# If only one of min/max v are set, respect that
nnn = treecorr.NNNCorrelation(min_sep=5, max_sep=20, bin_size=0.1,
min_u=0.2, ubin_size=0.03,
min_v=0.2, vbin_size=0.07)
#print(nnn.min_sep,nnn.max_sep,nnn.bin_size,nnn.nbins)
#print(nnn.min_u,nnn.max_u,nnn.ubin_size,nnn.nubins)
#print(nnn.min_v,nnn.max_v,nnn.vbin_size,nnn.nvbins)
assert nnn.min_u == 0.2
assert nnn.max_u == 1.
assert nnn.nubins == 27
assert np.isclose(nnn.ubin_size, 0.8/27)
assert nnn.min_v == 0.2
assert nnn.max_v == 1.
assert nnn.nvbins == 12
assert np.isclose(nnn.vbin_size, 0.8/12)
check_arrays(nnn)
nnn = treecorr.NNNCorrelation(min_sep=5, max_sep=20, bin_size=0.1,
max_u=0.2, ubin_size=0.03,
max_v=0.2, vbin_size=0.07)
#print(nnn.min_sep,nnn.max_sep,nnn.bin_size,nnn.nbins)
#print(nnn.min_u,nnn.max_u,nnn.ubin_size,nnn.nubins)
#print(nnn.min_v,nnn.max_v,nnn.vbin_size,nnn.nvbins)
assert nnn.min_u == 0.
assert nnn.max_u == 0.2
assert nnn.nubins == 7
assert np.isclose(nnn.ubin_size, 0.2/7)
assert nnn.min_v == 0.
assert nnn.max_v == 0.2
assert nnn.nvbins == 3
assert np.isclose(nnn.vbin_size, 0.2/3)
check_arrays(nnn)
# If only vbin_size is set for v, automatically figure out others.
# (And if necessary adjust the bin_size down a bit.)
nnn = treecorr.NNNCorrelation(min_sep=5, max_sep=20, bin_size=0.1,
ubin_size=0.3, vbin_size=0.3)
#print(nnn.min_sep,nnn.max_sep,nnn.bin_size,nnn.nbins)
#print(nnn.min_u,nnn.max_u,nnn.ubin_size,nnn.nubins)
#print(nnn.min_v,nnn.max_v,nnn.vbin_size,nnn.nvbins)
assert nnn.bin_size <= 0.1
assert nnn.min_sep == 5.
assert nnn.max_sep == 20.
assert nnn.min_u == 0.
assert nnn.max_u == 1.
assert nnn.nubins == 4
assert np.isclose(nnn.ubin_size, 0.25)
assert nnn.min_v == 0.
assert nnn.max_v == 1.
assert nnn.nvbins == 4
assert np.isclose(nnn.vbin_size, 0.25)
check_arrays(nnn)
# If only nvbins is set for v, automatically figure out others.
nnn = treecorr.NNNCorrelation(min_sep=5, max_sep=20, bin_size=0.1,
nubins=5, nvbins=5)
#print(nnn.min_sep,nnn.max_sep,nnn.bin_size,nnn.nbins)
#print(nnn.min_u,nnn.max_u,nnn.ubin_size,nnn.nubins)
#print(nnn.min_v,nnn.max_v,nnn.vbin_size,nnn.nvbins)
assert nnn.bin_size <= 0.1
assert nnn.min_sep == 5.
assert nnn.max_sep == 20.
assert nnn.min_u == 0.
assert nnn.max_u == 1.
assert nnn.nubins == 5
assert np.isclose(nnn.ubin_size,0.2)
assert nnn.min_v == 0.
assert nnn.max_v == 1.
assert nnn.nvbins == 5
assert np.isclose(nnn.vbin_size,0.2)
check_arrays(nnn)
# If both nvbins and vbin_size are set, set min/max automatically
nnn = treecorr.NNNCorrelation(min_sep=5, max_sep=20, bin_size=0.1,
ubin_size=0.1, nubins=5,
vbin_size=0.1, nvbins=5)
#print(nnn.min_sep,nnn.max_sep,nnn.bin_size,nnn.nbins)
#print(nnn.min_u,nnn.max_u,nnn.ubin_size,nnn.nubins)
#print(nnn.min_v,nnn.max_v,nnn.vbin_size,nnn.nvbins)
assert nnn.bin_size <= 0.1
assert nnn.min_sep == 5.
assert nnn.max_sep == 20.
assert nnn.ubin_size == 0.1
assert nnn.nubins == 5
assert nnn.max_u == 1.
assert np.isclose(nnn.min_u,0.5)
assert nnn.vbin_size == 0.1
assert nnn.nvbins == 5
assert nnn.min_v == 0.
assert np.isclose(nnn.max_v,0.5)
check_arrays(nnn)
assert_raises(TypeError, treecorr.NNNCorrelation)
assert_raises(TypeError, treecorr.NNNCorrelation, min_sep=5)
assert_raises(TypeError, treecorr.NNNCorrelation, max_sep=20)
assert_raises(TypeError, treecorr.NNNCorrelation, bin_size=0.1)
assert_raises(TypeError, treecorr.NNNCorrelation, nbins=20)
assert_raises(TypeError, treecorr.NNNCorrelation, min_sep=5, max_sep=20)
assert_raises(TypeError, treecorr.NNNCorrelation, min_sep=5, bin_size=0.1)
assert_raises(TypeError, treecorr.NNNCorrelation, min_sep=5, nbins=20)
assert_raises(TypeError, treecorr.NNNCorrelation, max_sep=20, bin_size=0.1)
assert_raises(TypeError, treecorr.NNNCorrelation, max_sep=20, nbins=20)
assert_raises(TypeError, treecorr.NNNCorrelation, bin_size=0.1, nbins=20)
assert_raises(TypeError, treecorr.NNNCorrelation, min_sep=5, max_sep=20, bin_size=0.1, nbins=20)
assert_raises(ValueError, treecorr.NNNCorrelation, min_sep=20, max_sep=5, bin_size=0.1)
assert_raises(ValueError, treecorr.NNNCorrelation, min_sep=20, max_sep=5, nbins=20)
assert_raises(ValueError, treecorr.NNNCorrelation, min_sep=20, max_sep=5, nbins=20,
bin_type='Log')
assert_raises(ValueError, treecorr.NNNCorrelation, min_sep=20, max_sep=5, nbins=20,
bin_type='Linear')
assert_raises(ValueError, treecorr.NNNCorrelation, min_sep=20, max_sep=5, nbins=20,
bin_type='TwoD')
assert_raises(ValueError, treecorr.NNNCorrelation, min_sep=20, max_sep=5, nbins=20,
bin_type='Invalid')
assert_raises(TypeError, treecorr.NNNCorrelation, min_sep=5, max_sep=20, bin_size=0.1,
min_u=0.3, max_u=0.9, ubin_size=0.1, nubins=6)
assert_raises(ValueError, treecorr.NNNCorrelation, min_sep=5, max_sep=20, bin_size=0.1,
min_u=0.9, max_u=0.3)
assert_raises(ValueError, treecorr.NNNCorrelation, min_sep=5, max_sep=20, bin_size=0.1,
min_u=-0.1, max_u=0.3)
assert_raises(ValueError, treecorr.NNNCorrelation, min_sep=5, max_sep=20, bin_size=0.1,
min_u=0.1, max_u=1.3)
assert_raises(TypeError, treecorr.NNNCorrelation, min_sep=5, max_sep=20, bin_size=0.1,
min_v=0.1, max_v=0.9, vbin_size=0.1, nvbins=9)
assert_raises(ValueError, treecorr.NNNCorrelation, min_sep=5, max_sep=20, bin_size=0.1,
min_v=0.9, max_v=0.3)
assert_raises(ValueError, treecorr.NNNCorrelation, min_sep=5, max_sep=20, bin_size=0.1,
min_v=-0.1, max_v=0.3)
assert_raises(ValueError, treecorr.NNNCorrelation, min_sep=5, max_sep=20, bin_size=0.1,
min_v=0.1, max_v=1.3)
assert_raises(ValueError, treecorr.NNNCorrelation, min_sep=20, max_sep=5, nbins=20,
split_method='invalid')
# Check the use of sep_units
# radians
nnn = treecorr.NNNCorrelation(min_sep=5, max_sep=20, nbins=20, sep_units='radians')
#print(nnn.min_sep,nnn.max_sep,nnn.bin_size,nnn.nbins)
#print(nnn.min_u,nnn.max_u,nnn.ubin_size,nnn.nubins)
#print(nnn.min_v,nnn.max_v,nnn.vbin_size,nnn.nvbins)
np.testing.assert_almost_equal(nnn.min_sep, 5.)
np.testing.assert_almost_equal(nnn.max_sep, 20.)
np.testing.assert_almost_equal(nnn._min_sep, 5.)
np.testing.assert_almost_equal(nnn._max_sep, 20.)
assert nnn.min_sep == 5.
assert nnn.max_sep == 20.
assert nnn.nbins == 20
check_defaultuv(nnn)
check_arrays(nnn)
# arcsec
nnn = treecorr.NNNCorrelation(min_sep=5, max_sep=20, nbins=20, sep_units='arcsec')
#print(nnn.min_sep,nnn.max_sep,nnn.bin_size,nnn.nbins)
#print(nnn.min_u,nnn.max_u,nnn.ubin_size,nnn.nubins)
#print(nnn.min_v,nnn.max_v,nnn.vbin_size,nnn.nvbins)
np.testing.assert_almost_equal(nnn.min_sep, 5.)
np.testing.assert_almost_equal(nnn.max_sep, 20.)
np.testing.assert_almost_equal(nnn._min_sep, 5. * math.pi/180/3600)
np.testing.assert_almost_equal(nnn._max_sep, 20. * math.pi/180/3600)
assert nnn.nbins == 20
np.testing.assert_almost_equal(nnn.bin_size * nnn.nbins, math.log(nnn.max_sep/nnn.min_sep))
# Note that logr is in the separation units, not radians.
np.testing.assert_almost_equal(nnn.logr[0], math.log(5) + 0.5*nnn.bin_size)
np.testing.assert_almost_equal(nnn.logr[-1], math.log(20) - 0.5*nnn.bin_size)
assert len(nnn.logr) == nnn.nbins
check_defaultuv(nnn)
# arcmin
nnn = treecorr.NNNCorrelation(min_sep=5, max_sep=20, nbins=20, sep_units='arcmin')
#print(nnn.min_sep,nnn.max_sep,nnn.bin_size,nnn.nbins)
#print(nnn.min_u,nnn.max_u,nnn.ubin_size,nnn.nubins)
#print(nnn.min_v,nnn.max_v,nnn.vbin_size,nnn.nvbins)
np.testing.assert_almost_equal(nnn.min_sep, 5.)
np.testing.assert_almost_equal(nnn.max_sep, 20.)
np.testing.assert_almost_equal(nnn._min_sep, 5. * math.pi/180/60)
np.testing.assert_almost_equal(nnn._max_sep, 20. * math.pi/180/60)
assert nnn.nbins == 20
np.testing.assert_almost_equal(nnn.bin_size * nnn.nbins, math.log(nnn.max_sep/nnn.min_sep))
np.testing.assert_almost_equal(nnn.logr[0], math.log(5) + 0.5*nnn.bin_size)
np.testing.assert_almost_equal(nnn.logr[-1], math.log(20) - 0.5*nnn.bin_size)
assert len(nnn.logr) == nnn.nbins
check_defaultuv(nnn)
# degrees
nnn = treecorr.NNNCorrelation(min_sep=5, max_sep=20, nbins=20, sep_units='degrees')
#print(nnn.min_sep,nnn.max_sep,nnn.bin_size,nnn.nbins)
#print(nnn.min_u,nnn.max_u,nnn.ubin_size,nnn.nubins)
#print(nnn.min_v,nnn.max_v,nnn.vbin_size,nnn.nvbins)
np.testing.assert_almost_equal(nnn.min_sep, 5.)
np.testing.assert_almost_equal(nnn.max_sep, 20.)
np.testing.assert_almost_equal(nnn._min_sep, 5. * math.pi/180)
np.testing.assert_almost_equal(nnn._max_sep, 20. * math.pi/180)
assert nnn.nbins == 20
np.testing.assert_almost_equal(nnn.bin_size * nnn.nbins, math.log(nnn.max_sep/nnn.min_sep))
np.testing.assert_almost_equal(nnn.logr[0], math.log(5) + 0.5*nnn.bin_size)
np.testing.assert_almost_equal(nnn.logr[-1], math.log(20) - 0.5*nnn.bin_size)
assert len(nnn.logr) == nnn.nbins
check_defaultuv(nnn)
# hours
nnn = treecorr.NNNCorrelation(min_sep=5, max_sep=20, nbins=20, sep_units='hours')
#print(nnn.min_sep,nnn.max_sep,nnn.bin_size,nnn.nbins)
#print(nnn.min_u,nnn.max_u,nnn.ubin_size,nnn.nubins)
#print(nnn.min_v,nnn.max_v,nnn.vbin_size,nnn.nvbins)
np.testing.assert_almost_equal(nnn.min_sep, 5.)
np.testing.assert_almost_equal(nnn.max_sep, 20.)
np.testing.assert_almost_equal(nnn._min_sep, 5. * math.pi/12)
np.testing.assert_almost_equal(nnn._max_sep, 20. * math.pi/12)
assert nnn.nbins == 20
np.testing.assert_almost_equal(nnn.bin_size * nnn.nbins, math.log(nnn.max_sep/nnn.min_sep))
np.testing.assert_almost_equal(nnn.logr[0], math.log(5) + 0.5*nnn.bin_size)
np.testing.assert_almost_equal(nnn.logr[-1], math.log(20) - 0.5*nnn.bin_size)
assert len(nnn.logr) == nnn.nbins
check_defaultuv(nnn)
# Check bin_slop
# Start with default behavior
nnn = treecorr.NNNCorrelation(min_sep=5, nbins=14, bin_size=0.1,
min_u=0., max_u=0.9, ubin_size=0.03,
min_v=0., max_v=0.21, vbin_size=0.07)
#print(nnn.bin_size,nnn.bin_slop,nnn.b)
#print(nnn.ubin_size,nnn.bu)
#print(nnn.vbin_size,nnn.bv)
assert nnn.bin_slop == 1.0
assert nnn.bin_size == 0.1
assert np.isclose(nnn.ubin_size, 0.03)
assert np.isclose(nnn.vbin_size, 0.07)
np.testing.assert_almost_equal(nnn.b, 0.1)
np.testing.assert_almost_equal(nnn.bu, 0.03)
np.testing.assert_almost_equal(nnn.bv, 0.07)
# Explicitly set bin_slop=1.0 does the same thing.
nnn = treecorr.NNNCorrelation(min_sep=5, nbins=14, bin_size=0.1, bin_slop=1.0,
min_u=0., max_u=0.9, ubin_size=0.03,
min_v=0., max_v=0.21, vbin_size=0.07)
#print(nnn.bin_size,nnn.bin_slop,nnn.b)
#print(nnn.ubin_size,nnn.bu)
#print(nnn.vbin_size,nnn.bv)
assert nnn.bin_slop == 1.0
assert nnn.bin_size == 0.1
assert np.isclose(nnn.ubin_size, 0.03)
assert np.isclose(nnn.vbin_size, 0.07)
np.testing.assert_almost_equal(nnn.b, 0.1)
np.testing.assert_almost_equal(nnn.bu, 0.03)
np.testing.assert_almost_equal(nnn.bv, 0.07)
# Use a smaller bin_slop
nnn = treecorr.NNNCorrelation(min_sep=5, nbins=14, bin_size=0.1, bin_slop=0.2,
min_u=0., max_u=0.9, ubin_size=0.03,
min_v=0., max_v=0.21, vbin_size=0.07)
#print(nnn.bin_size,nnn.bin_slop,nnn.b)
#print(nnn.ubin_size,nnn.bu)
#print(nnn.vbin_size,nnn.bv)
assert nnn.bin_slop == 0.2
assert nnn.bin_size == 0.1
assert np.isclose(nnn.ubin_size, 0.03)
assert np.isclose(nnn.vbin_size, 0.07)
np.testing.assert_almost_equal(nnn.b, 0.02)
np.testing.assert_almost_equal(nnn.bu, 0.006)
np.testing.assert_almost_equal(nnn.bv, 0.014)
# Use bin_slop == 0
nnn = treecorr.NNNCorrelation(min_sep=5, nbins=14, bin_size=0.1, bin_slop=0.0,
min_u=0., max_u=0.9, ubin_size=0.03,
min_v=0., max_v=0.21, vbin_size=0.07)
#print(nnn.bin_size,nnn.bin_slop,nnn.b)
#print(nnn.ubin_size,nnn.bu)
#print(nnn.vbin_size,nnn.bv)
assert nnn.bin_slop == 0.0
assert nnn.bin_size == 0.1
assert np.isclose(nnn.ubin_size, 0.03)
assert np.isclose(nnn.vbin_size, 0.07)
np.testing.assert_almost_equal(nnn.b, 0.0)
np.testing.assert_almost_equal(nnn.bu, 0.0)
np.testing.assert_almost_equal(nnn.bv, 0.0)
# Bigger bin_slop
nnn = treecorr.NNNCorrelation(min_sep=5, nbins=14, bin_size=0.1, bin_slop=2.0,
min_u=0., max_u=0.9, ubin_size=0.03,
min_v=0., max_v=0.21, vbin_size=0.07, verbose=0)
#print(nnn.bin_size,nnn.bin_slop,nnn.b)
#print(nnn.ubin_size,nnn.bu)
#print(nnn.vbin_size,nnn.bv)
assert nnn.bin_slop == 2.0
assert nnn.bin_size == 0.1
assert np.isclose(nnn.ubin_size, 0.03)
assert np.isclose(nnn.vbin_size, 0.07)
np.testing.assert_almost_equal(nnn.b, 0.2)
np.testing.assert_almost_equal(nnn.bu, 0.06)
np.testing.assert_almost_equal(nnn.bv, 0.14)
# With bin_size > 0.1, explicit bin_slop=1.0 is accepted.
nnn = treecorr.NNNCorrelation(min_sep=5, nbins=14, bin_size=0.4, bin_slop=1.0,
min_u=0., max_u=0.9, ubin_size=0.03,
min_v=0., max_v=0.21, vbin_size=0.07, verbose=0)
#print(nnn.bin_size,nnn.bin_slop,nnn.b)
#print(nnn.ubin_size,nnn.bu)
#print(nnn.vbin_size,nnn.bv)
assert nnn.bin_slop == 1.0
assert nnn.bin_size == 0.4
assert np.isclose(nnn.ubin_size, 0.03)
assert np.isclose(nnn.vbin_size, 0.07)
np.testing.assert_almost_equal(nnn.b, 0.4)
np.testing.assert_almost_equal(nnn.bu, 0.03)
np.testing.assert_almost_equal(nnn.bv, 0.07)
# But implicit bin_slop is reduced so that b = 0.1
nnn = treecorr.NNNCorrelation(min_sep=5, nbins=14, bin_size=0.4,
min_u=0., max_u=0.9, ubin_size=0.03,
min_v=0., max_v=0.21, vbin_size=0.07)
#print(nnn.bin_size,nnn.bin_slop,nnn.b)
#print(nnn.ubin_size,nnn.bu)
#print(nnn.vbin_size,nnn.bv)
assert nnn.bin_size == 0.4
assert np.isclose(nnn.ubin_size, 0.03)
assert np.isclose(nnn.vbin_size, 0.07)
np.testing.assert_almost_equal(nnn.b, 0.1)
np.testing.assert_almost_equal(nnn.bu, 0.03)
np.testing.assert_almost_equal(nnn.bv, 0.07)
np.testing.assert_almost_equal(nnn.bin_slop, 0.25)
# Separately for each of the three parameters
nnn = treecorr.NNNCorrelation(min_sep=5, nbins=14, bin_size=0.05,
min_u=0., max_u=0.9, ubin_size=0.3,
min_v=0., max_v=0.17, vbin_size=0.17)
#print(nnn.bin_size,nnn.bin_slop,nnn.b)
#print(nnn.ubin_size,nnn.bu)
#print(nnn.vbin_size,nnn.bv)
assert nnn.bin_size == 0.05
assert np.isclose(nnn.ubin_size, 0.3)
assert np.isclose(nnn.vbin_size, 0.17)
np.testing.assert_almost_equal(nnn.b, 0.05)
np.testing.assert_almost_equal(nnn.bu, 0.1)
np.testing.assert_almost_equal(nnn.bv, 0.1)
np.testing.assert_almost_equal(nnn.bin_slop, 1.0) # The stored bin_slop is just for lnr
@timer
def test_direct_count_auto():
# If the catalogs are small enough, we can do a direct count of the number of triangles
# to see if comes out right. This should exactly match the treecorr code if bin_slop=0.
ngal = 50
s = 10.
rng = np.random.RandomState(8675309)
x = rng.normal(0,s, (ngal,) )
y = rng.normal(0,s, (ngal,) )
cat = treecorr.Catalog(x=x, y=y)
min_sep = 1.
max_sep = 50.
nbins = 50
min_u = 0.13
max_u = 0.89
nubins = 10
min_v = 0.13
max_v = 0.59
nvbins = 10
ddd = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
min_u=min_u, max_u=max_u, nubins=nubins,
min_v=min_v, max_v=max_v, nvbins=nvbins,
brute=True, verbose=1)
ddd.process(cat)
log_min_sep = np.log(min_sep)
log_max_sep = np.log(max_sep)
true_ntri = np.zeros( (nbins, nubins, 2*nvbins) )
bin_size = (log_max_sep - log_min_sep) / nbins
ubin_size = (max_u-min_u) / nubins
vbin_size = (max_v-min_v) / nvbins
for i in range(ngal):
for j in range(i+1,ngal):
for k in range(j+1,ngal):
dij = np.sqrt((x[i]-x[j])**2 + (y[i]-y[j])**2)
dik = np.sqrt((x[i]-x[k])**2 + (y[i]-y[k])**2)
djk = np.sqrt((x[j]-x[k])**2 + (y[j]-y[k])**2)
if dij == 0.: continue
if dik == 0.: continue
if djk == 0.: continue
if dij < dik:
if dik < djk:
d3 = dij; d2 = dik; d1 = djk
ccw = is_ccw(x[i],y[i],x[j],y[j],x[k],y[k])
elif dij < djk:
d3 = dij; d2 = djk; d1 = dik
ccw = is_ccw(x[j],y[j],x[i],y[i],x[k],y[k])
else:
d3 = djk; d2 = dij; d1 = dik
ccw = is_ccw(x[j],y[j],x[k],y[k],x[i],y[i])
else:
if dij < djk:
d3 = dik; d2 = dij; d1 = djk
ccw = is_ccw(x[i],y[i],x[k],y[k],x[j],y[j])
elif dik < djk:
d3 = dik; d2 = djk; d1 = dij
ccw = is_ccw(x[k],y[k],x[i],y[i],x[j],y[j])
else:
d3 = djk; d2 = dik; d1 = dij
ccw = is_ccw(x[k],y[k],x[j],y[j],x[i],y[i])
r = d2
u = d3/d2
v = (d1-d2)/d3
if r < min_sep or r >= max_sep: continue
if u < min_u or u >= max_u: continue
if v < min_v or v >= max_v: continue
if not ccw:
v = -v
kr = int(np.floor( (np.log(r)-log_min_sep) / bin_size ))
ku = int(np.floor( (u-min_u) / ubin_size ))
if v > 0:
kv = int(np.floor( (v-min_v) / vbin_size )) + nvbins
else:
kv = int(np.floor( (v-(-max_v)) / vbin_size ))
assert 0 <= kr < nbins
assert 0 <= ku < nubins
assert 0 <= kv < 2*nvbins
true_ntri[kr,ku,kv] += 1
nz = np.where((ddd.ntri > 0) | (true_ntri > 0))
print('non-zero at:')
print(nz)
print('d1 = ',ddd.meand1[nz])
print('d2 = ',ddd.meand2[nz])
print('d3 = ',ddd.meand3[nz])
print('rnom = ',ddd.rnom[nz])
print('u = ',ddd.u[nz])
print('v = ',ddd.v[nz])
print('ddd.ntri = ',ddd.ntri[nz])
print('true_ntri = ',true_ntri[nz])
print('diff = ',ddd.ntri[nz] - true_ntri[nz])
np.testing.assert_array_equal(ddd.ntri, true_ntri)
# Check that running via the corr3 script works correctly.
file_name = os.path.join('data','nnn_direct_data.dat')
with open(file_name, 'w') as fid:
for i in range(ngal):
fid.write(('%.20f %.20f\n')%(x[i],y[i]))
L = 10*s
nrand = ngal
rx = (rng.random_sample(nrand)-0.5) * L
ry = (rng.random_sample(nrand)-0.5) * L
rcat = treecorr.Catalog(x=rx, y=ry)
rand_file_name = os.path.join('data','nnn_direct_rand.dat')
with open(rand_file_name, 'w') as fid:
for i in range(nrand):
fid.write(('%.20f %.20f\n')%(rx[i],ry[i]))
rrr = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
min_u=min_u, max_u=max_u, nubins=nubins,
min_v=min_v, max_v=max_v, nvbins=nvbins,
brute=True, verbose=0, rng=rng)
rrr.process(rcat)
zeta, varzeta = ddd.calculateZeta(rrr)
# Semi-gratuitous check of BinnedCorr3.rng access.
assert rrr.rng is rng
assert ddd.rng is not rng
# First do this via the corr3 function.
config = treecorr.config.read_config('configs/nnn_direct.yaml')
logger = treecorr.config.setup_logger(0)
treecorr.corr3(config, logger)
corr3_output = np.genfromtxt(os.path.join('output','nnn_direct.out'), names=True,
skip_header=1)
print('corr3_output = ',corr3_output)
print('corr3_output.dtype = ',corr3_output.dtype)
print('rnom = ',ddd.rnom.flatten())
print(' ',corr3_output['r_nom'])
np.testing.assert_allclose(corr3_output['r_nom'], ddd.rnom.flatten(), rtol=1.e-3)
print('unom = ',ddd.u.flatten())
print(' ',corr3_output['u_nom'])
np.testing.assert_allclose(corr3_output['u_nom'], ddd.u.flatten(), rtol=1.e-3)
print('vnom = ',ddd.v.flatten())
print(' ',corr3_output['v_nom'])
np.testing.assert_allclose(corr3_output['v_nom'], ddd.v.flatten(), rtol=1.e-3)
print('DDD = ',ddd.ntri.flatten())
print(' ',corr3_output['DDD'])
np.testing.assert_allclose(corr3_output['DDD'], ddd.ntri.flatten(), rtol=1.e-3)
np.testing.assert_allclose(corr3_output['ntri'], ddd.ntri.flatten(), rtol=1.e-3)
print('RRR = ',rrr.ntri.flatten())
print(' ',corr3_output['RRR'])
np.testing.assert_allclose(corr3_output['RRR'], rrr.ntri.flatten(), rtol=1.e-3)
print('zeta = ',zeta.flatten())
print('from corr3 output = ',corr3_output['zeta'])
print('diff = ',corr3_output['zeta']-zeta.flatten())
diff_index = np.where(np.abs(corr3_output['zeta']-zeta.flatten()) > 1.e-5)[0]
print('different at ',diff_index)
print('zeta[diffs] = ',zeta.flatten()[diff_index])
print('corr3.zeta[diffs] = ',corr3_output['zeta'][diff_index])
print('diff[diffs] = ',zeta.flatten()[diff_index] - corr3_output['zeta'][diff_index])
np.testing.assert_allclose(corr3_output['zeta'], zeta.flatten(), rtol=1.e-3)
np.testing.assert_allclose(corr3_output['sigma_zeta'], np.sqrt(varzeta).flatten(), rtol=1.e-3)
# Now calling out to the external corr3 executable.
# This is the only time we test the corr3 executable. All other tests use corr3 function.
import subprocess
corr3_exe = get_script_name('corr3')
p = subprocess.Popen( [corr3_exe,"configs/nnn_direct.yaml","verbose=0"] )
p.communicate()
corr3_output = np.genfromtxt(os.path.join('output','nnn_direct.out'), names=True,
skip_header=1)
np.testing.assert_allclose(corr3_output['zeta'], zeta.flatten(), rtol=1.e-3)
# Also check compensated
drr = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
min_u=min_u, max_u=max_u, nubins=nubins,
min_v=min_v, max_v=max_v, nvbins=nvbins,
brute=True, verbose=0)
rdd = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
min_u=min_u, max_u=max_u, nubins=nubins,
min_v=min_v, max_v=max_v, nvbins=nvbins,
brute=True, verbose=0)
drr.process(cat, rcat)
rdd.process(rcat, cat)
zeta, varzeta = ddd.calculateZeta(rrr,drr,rdd)
config['nnn_statistic'] = 'compensated'
treecorr.corr3(config, logger)
corr3_output = np.genfromtxt(os.path.join('output','nnn_direct.out'), names=True, skip_header=1)
np.testing.assert_allclose(corr3_output['r_nom'], ddd.rnom.flatten(), rtol=1.e-3)
np.testing.assert_allclose(corr3_output['u_nom'], ddd.u.flatten(), rtol=1.e-3)
np.testing.assert_allclose(corr3_output['v_nom'], ddd.v.flatten(), rtol=1.e-3)
np.testing.assert_allclose(corr3_output['DDD'], ddd.ntri.flatten(), rtol=1.e-3)
np.testing.assert_allclose(corr3_output['ntri'], ddd.ntri.flatten(), rtol=1.e-3)
print('rrr.tot = ',rrr.tot)
print('ddd.tot = ',ddd.tot)
print('drr.tot = ',drr.tot)
print('rdd.tot = ',rdd.tot)
rrrf = ddd.tot / rrr.tot
drrf = ddd.tot / drr.tot
rddf = ddd.tot / rdd.tot
np.testing.assert_allclose(corr3_output['RRR'], rrr.ntri.flatten() * rrrf, rtol=1.e-3)
np.testing.assert_allclose(corr3_output['DRR'], drr.ntri.flatten() * drrf, rtol=1.e-3)
np.testing.assert_allclose(corr3_output['RDD'], rdd.ntri.flatten() * rddf, rtol=1.e-3)
np.testing.assert_allclose(corr3_output['zeta'], zeta.flatten(), rtol=1.e-3)
np.testing.assert_allclose(corr3_output['sigma_zeta'], np.sqrt(varzeta).flatten(), rtol=1.e-3)
# Repeat with binslop = 0, since the code flow is different from bture=True
ddd = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
min_u=min_u, max_u=max_u, nubins=nubins,
min_v=min_v, max_v=max_v, nvbins=nvbins,
bin_slop=0, verbose=1)
ddd.process(cat)
#print('ddd.ntri = ',ddd.ntri)
#print('true_ntri => ',true_ntri)
#print('diff = ',ddd.ntri - true_ntri)
np.testing.assert_array_equal(ddd.ntri, true_ntri)
# And again with no top-level recursion
ddd = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
min_u=min_u, max_u=max_u, nubins=nubins,
min_v=min_v, max_v=max_v, nvbins=nvbins,
bin_slop=0, verbose=1, max_top=0)
ddd.process(cat)
#print('ddd.ntri = ',ddd.ntri)
#print('true_ntri => ',true_ntri)
#print('diff = ',ddd.ntri - true_ntri)
np.testing.assert_array_equal(ddd.ntri, true_ntri)
# And compare to the cross correlation
# Here, we get 6x as much, since each triangle is discovered 6 times.
ddd.clear()
ddd.process(cat,cat,cat, num_threads=2)
#print('ddd.ntri = ',ddd.ntri)
#print('true_ntri => ',true_ntri)
#print('diff = ',ddd.ntri - true_ntri)
np.testing.assert_array_equal(ddd.ntri, 6*true_ntri)
# With the real CrossCorrelation class, each of the 6 correlations should end up being
# the same thing (without the extra factor of 6).
dddc = treecorr.NNNCrossCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
min_u=min_u, max_u=max_u, nubins=nubins,
min_v=min_v, max_v=max_v, nvbins=nvbins,
bin_slop=0, verbose=1, max_top=0)
dddc.process(cat,cat,cat, num_threads=2)
# All 6 correlations are equal.
for d in [dddc.n1n2n3, dddc.n1n3n2, dddc.n2n1n3, dddc.n2n3n1, dddc.n3n1n2, dddc.n3n2n1]:
#print('ddd.ntri = ',ddd.ntri)
#print('true_ntri => ',true_ntri)
#print('diff = ',ddd.ntri - true_ntri)
np.testing.assert_array_equal(d.ntri, true_ntri)
# Or with 2 argument version, finds each triangle 3 times.
ddd.process(cat,cat, num_threads=2)
np.testing.assert_array_equal(ddd.ntri, 3*true_ntri)
# Again, NNNCrossCorrelation gets it right in each permutation.
dddc.process(cat,cat, num_threads=2)
for d in [dddc.n1n2n3, dddc.n1n3n2, dddc.n2n1n3, dddc.n2n3n1, dddc.n3n1n2, dddc.n3n2n1]:
np.testing.assert_array_equal(d.ntri, true_ntri)
# Invalid to omit file_name
config['verbose'] = 0
del config['file_name']
with assert_raises(TypeError):
treecorr.corr3(config)
config['file_name'] = 'data/nnn_direct_data.dat'
# OK to not have rand_file_name
# Also, check the automatic setting of output_dots=True when verbose=2.
# It's not too annoying if we also set max_top = 0.
del config['rand_file_name']
config['verbose'] = 2
config['max_top'] = 0
treecorr.corr3(config)
data = np.genfromtxt(config['nnn_file_name'], names=True, skip_header=1)
np.testing.assert_array_equal(data['ntri'], true_ntri.flatten())
assert 'zeta' not in data.dtype.names
# Check a few basic operations with a NNNCorrelation object.
do_pickle(ddd)
ddd2 = ddd.copy()
ddd2 += ddd
np.testing.assert_allclose(ddd2.ntri, 2*ddd.ntri)
np.testing.assert_allclose(ddd2.weight, 2*ddd.weight)
np.testing.assert_allclose(ddd2.meand1, 2*ddd.meand1)
np.testing.assert_allclose(ddd2.meand2, 2*ddd.meand2)
np.testing.assert_allclose(ddd2.meand3, 2*ddd.meand3)
np.testing.assert_allclose(ddd2.meanlogd1, 2*ddd.meanlogd1)
np.testing.assert_allclose(ddd2.meanlogd2, 2*ddd.meanlogd2)
np.testing.assert_allclose(ddd2.meanlogd3, 2*ddd.meanlogd3)
np.testing.assert_allclose(ddd2.meanu, 2*ddd.meanu)
np.testing.assert_allclose(ddd2.meanv, 2*ddd.meanv)
ddd2.clear()
ddd2 += ddd
np.testing.assert_allclose(ddd2.ntri, ddd.ntri)
np.testing.assert_allclose(ddd2.weight, ddd.weight)
np.testing.assert_allclose(ddd2.meand1, ddd.meand1)
np.testing.assert_allclose(ddd2.meand2, ddd.meand2)
np.testing.assert_allclose(ddd2.meand3, ddd.meand3)
np.testing.assert_allclose(ddd2.meanlogd1, ddd.meanlogd1)
np.testing.assert_allclose(ddd2.meanlogd2, ddd.meanlogd2)
np.testing.assert_allclose(ddd2.meanlogd3, ddd.meanlogd3)
np.testing.assert_allclose(ddd2.meanu, ddd.meanu)
np.testing.assert_allclose(ddd2.meanv, ddd.meanv)
ascii_name = 'output/nnn_ascii.txt'
ddd.write(ascii_name, precision=16)
ddd3 = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
min_u=min_u, max_u=max_u, nubins=nubins,
min_v=min_v, max_v=max_v, nvbins=nvbins)
ddd3.read(ascii_name)
np.testing.assert_allclose(ddd3.ntri, ddd.ntri)
np.testing.assert_allclose(ddd3.weight, ddd.weight)
np.testing.assert_allclose(ddd3.meand1, ddd.meand1)
np.testing.assert_allclose(ddd3.meand2, ddd.meand2)
np.testing.assert_allclose(ddd3.meand3, ddd.meand3)
np.testing.assert_allclose(ddd3.meanlogd1, ddd.meanlogd1)
np.testing.assert_allclose(ddd3.meanlogd2, ddd.meanlogd2)
np.testing.assert_allclose(ddd3.meanlogd3, ddd.meanlogd3)
np.testing.assert_allclose(ddd3.meanu, ddd.meanu)
np.testing.assert_allclose(ddd3.meanv, ddd.meanv)
with assert_raises(TypeError):
ddd2 += config
ddd4 = treecorr.NNNCorrelation(min_sep=min_sep/2, max_sep=max_sep, nbins=nbins,
min_u=min_u, max_u=max_u, nubins=nubins,
min_v=min_v, max_v=max_v, nvbins=nvbins)
with assert_raises(ValueError):
ddd2 += ddd4
ddd5 = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep*2, nbins=nbins,
min_u=min_u, max_u=max_u, nubins=nubins,
min_v=min_v, max_v=max_v, nvbins=nvbins)
with assert_raises(ValueError):
ddd2 += ddd5
ddd6 = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins*2,
min_u=min_u, max_u=max_u, nubins=nubins,
min_v=min_v, max_v=max_v, nvbins=nvbins)
with assert_raises(ValueError):
ddd2 += ddd6
ddd7 = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
min_u=min_u-0.1, max_u=max_u, nubins=nubins,
min_v=min_v, max_v=max_v, nvbins=nvbins)
with assert_raises(ValueError):
ddd2 += ddd7
ddd8 = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
min_u=min_u, max_u=max_u+0.1, nubins=nubins,
min_v=min_v, max_v=max_v, nvbins=nvbins)
with assert_raises(ValueError):
ddd2 += ddd8
ddd9 = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
min_u=min_u, max_u=max_u, nubins=nubins*2,
min_v=min_v, max_v=max_v, nvbins=nvbins)
with assert_raises(ValueError):
ddd2 += ddd9
ddd10 = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
min_u=min_u, max_u=max_u, nubins=nubins,
min_v=min_v-0.1, max_v=max_v, nvbins=nvbins)
with assert_raises(ValueError):
ddd2 += ddd10
ddd11 = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
min_u=min_u, max_u=max_u, nubins=nubins,
min_v=min_v, max_v=max_v+0.1, nvbins=nvbins)
with assert_raises(ValueError):
ddd2 += ddd11
ddd12 = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
min_u=min_u, max_u=max_u, nubins=nubins,
min_v=min_v, max_v=max_v, nvbins=nvbins*2)
with assert_raises(ValueError):
ddd2 += ddd12
# Check that adding results with different coords or metric emits a warning.
cat2 = treecorr.Catalog(x=x, y=y, z=x)
with CaptureLog() as cl:
ddd13 = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
min_u=min_u, max_u=max_u, nubins=nubins,
min_v=min_v, max_v=max_v, nvbins=nvbins,
logger=cl.logger)
ddd13.process_auto(cat2)
ddd13 += ddd2
print(cl.output)
assert "Detected a change in catalog coordinate systems" in cl.output
with CaptureLog() as cl:
ddd14 = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
min_u=min_u, max_u=max_u, nubins=nubins,
min_v=min_v, max_v=max_v, nvbins=nvbins,
logger=cl.logger)
ddd14.process_auto(cat2, metric='Arc')
ddd14 += ddd2
assert "Detected a change in metric" in cl.output
fits_name = 'output/nnn_fits.fits'
ddd.write(fits_name)
ddd15 = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
min_u=min_u, max_u=max_u, nubins=nubins,
min_v=min_v, max_v=max_v, nvbins=nvbins)
ddd15.read(fits_name)
np.testing.assert_allclose(ddd15.ntri, ddd.ntri)
np.testing.assert_allclose(ddd15.weight, ddd.weight)
np.testing.assert_allclose(ddd15.meand1, ddd.meand1)
np.testing.assert_allclose(ddd15.meand2, ddd.meand2)
np.testing.assert_allclose(ddd15.meand3, ddd.meand3)
np.testing.assert_allclose(ddd15.meanlogd1, ddd.meanlogd1)
np.testing.assert_allclose(ddd15.meanlogd2, ddd.meanlogd2)
np.testing.assert_allclose(ddd15.meanlogd3, ddd.meanlogd3)
np.testing.assert_allclose(ddd15.meanu, ddd.meanu)
np.testing.assert_allclose(ddd15.meanv, ddd.meanv)
@timer
def test_direct_count_cross():
# If the catalogs are small enough, we can do a direct count of the number of triangles
# to see if comes out right. This should exactly match the treecorr code if brute=True
ngal = 50
s = 10.
rng = np.random.RandomState(8675309)
x1 = rng.normal(0,s, (ngal,) )
y1 = rng.normal(0,s, (ngal,) )
cat1 = treecorr.Catalog(x=x1, y=y1)
x2 = rng.normal(0,s, (ngal,) )
y2 = rng.normal(0,s, (ngal,) )
cat2 = treecorr.Catalog(x=x2, y=y2)
x3 = rng.normal(0,s, (ngal,) )
y3 = rng.normal(0,s, (ngal,) )
cat3 = treecorr.Catalog(x=x3, y=y3)
min_sep = 1.
max_sep = 50.
nbins = 50
min_u = 0.13
max_u = 0.89
nubins = 10
min_v = 0.13
max_v = 0.59
nvbins = 10
ddd = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
min_u=min_u, max_u=max_u, nubins=nubins,
min_v=min_v, max_v=max_v, nvbins=nvbins,
brute=True, verbose=1)
ddd.process(cat1, cat2, cat3)
#print('ddd.ntri = ',ddd.ntri)
log_min_sep = np.log(min_sep)
log_max_sep = np.log(max_sep)
true_ntri_123 = np.zeros( (nbins, nubins, 2*nvbins) )
true_ntri_132 = np.zeros( (nbins, nubins, 2*nvbins) )
true_ntri_213 = np.zeros( (nbins, nubins, 2*nvbins) )
true_ntri_231 = np.zeros( (nbins, nubins, 2*nvbins) )
true_ntri_312 = np.zeros( (nbins, nubins, 2*nvbins) )
true_ntri_321 = np.zeros( (nbins, nubins, 2*nvbins) )
bin_size = (log_max_sep - log_min_sep) / nbins
ubin_size = (max_u-min_u) / nubins
vbin_size = (max_v-min_v) / nvbins
for i in range(ngal):
for j in range(ngal):
for k in range(ngal):
dij = np.sqrt((x1[i]-x2[j])**2 + (y1[i]-y2[j])**2)
dik = np.sqrt((x1[i]-x3[k])**2 + (y1[i]-y3[k])**2)
djk = np.sqrt((x2[j]-x3[k])**2 + (y2[j]-y3[k])**2)
if dij == 0.: continue
if dik == 0.: continue
if djk == 0.: continue
if dij < dik:
if dik < djk:
d3 = dij; d2 = dik; d1 = djk
ccw = is_ccw(x1[i],y1[i],x2[j],y2[j],x3[k],y3[k])
true_ntri = true_ntri_123
elif dij < djk:
d3 = dij; d2 = djk; d1 = dik
ccw = is_ccw(x2[j],y2[j],x1[i],y1[i],x3[k],y3[k])
true_ntri = true_ntri_213
else:
d3 = djk; d2 = dij; d1 = dik
ccw = is_ccw(x2[j],y2[j],x3[k],y3[k],x1[i],y1[i])
true_ntri = true_ntri_231
else:
if dij < djk:
d3 = dik; d2 = dij; d1 = djk
ccw = is_ccw(x1[i],y1[i],x3[k],y3[k],x2[j],y2[j])
true_ntri = true_ntri_132
elif dik < djk:
d3 = dik; d2 = djk; d1 = dij
ccw = is_ccw(x3[k],y3[k],x1[i],y1[i],x2[j],y2[j])
true_ntri = true_ntri_312
else:
d3 = djk; d2 = dik; d1 = dij
ccw = is_ccw(x3[k],y3[k],x2[j],y2[j],x1[i],y1[i])
true_ntri = true_ntri_321
r = d2
u = d3/d2
v = (d1-d2)/d3
if r < min_sep or r >= max_sep: continue
if u < min_u or u >= max_u: continue
if v < min_v or v >= max_v: continue
if not ccw:
v = -v
kr = int(np.floor( (np.log(r)-log_min_sep) / bin_size ))
ku = int(np.floor( (u-min_u) / ubin_size ))
if v > 0:
kv = int(np.floor( (v-min_v) / vbin_size )) + nvbins
else:
kv = int(np.floor( (v-(-max_v)) / vbin_size ))
assert 0 <= kr < nbins
assert 0 <= ku < nubins
assert 0 <= kv < 2*nvbins
true_ntri[kr,ku,kv] += 1
# With the regular NNNCorrelation class, we end up with the sum of all permutations.
true_ntri_sum = true_ntri_123 + true_ntri_132 + true_ntri_213 + true_ntri_231 +\
true_ntri_312 + true_ntri_321
#print('true_ntri = ',true_ntri_sum)
#print('diff = ',ddd.ntri - true_ntri_sum)
np.testing.assert_array_equal(ddd.ntri, true_ntri_sum)
# Now repeat with the full CrossCorrelation class, which distinguishes the permutations.
dddc = treecorr.NNNCrossCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
min_u=min_u, max_u=max_u, nubins=nubins,
min_v=min_v, max_v=max_v, nvbins=nvbins,
brute=True, verbose=1)
dddc.process(cat1, cat2, cat3)
#print('true_ntri_123 = ',true_ntri_123)
#print('diff = ',dddc.n1n2n3.ntri - true_ntri_123)
np.testing.assert_array_equal(dddc.n1n2n3.ntri, true_ntri_123)
np.testing.assert_array_equal(dddc.n1n3n2.ntri, true_ntri_132)
np.testing.assert_array_equal(dddc.n2n1n3.ntri, true_ntri_213)
np.testing.assert_array_equal(dddc.n2n3n1.ntri, true_ntri_231)
np.testing.assert_array_equal(dddc.n3n1n2.ntri, true_ntri_312)
np.testing.assert_array_equal(dddc.n3n2n1.ntri, true_ntri_321)
# Repeat with binslop = 0
ddd = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
min_u=min_u, max_u=max_u, nubins=nubins,
min_v=min_v, max_v=max_v, nvbins=nvbins,
bin_slop=0, verbose=1)
ddd.process(cat1, cat2, cat3)
#print('binslop > 0: ddd.ntri = ',ddd.ntri)
#print('diff = ',ddd.ntri - true_ntri_sum)
np.testing.assert_array_equal(ddd.ntri, true_ntri_sum)
# And again with no top-level recursion
ddd = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
min_u=min_u, max_u=max_u, nubins=nubins,
min_v=min_v, max_v=max_v, nvbins=nvbins,
bin_slop=0, verbose=1, max_top=0)
ddd.process(cat1, cat2, cat3)
#print('max_top = 0: ddd.ntri = ',ddd.ntri)
#print('true_ntri = ',true_ntri_sum)
#print('diff = ',ddd.ntri - true_ntri_sum)
np.testing.assert_array_equal(ddd.ntri, true_ntri_sum)
# Error to have cat3, but not cat2
with assert_raises(ValueError):
ddd.process(cat1, cat3=cat3)
# Check a few basic operations with a NNCrossCorrelation object.
do_pickle(dddc)
dddc2 = dddc.copy()
dddc2 += dddc
for perm in ['n1n2n3', 'n1n3n2', 'n2n1n3', 'n2n3n1', 'n3n1n2', 'n3n2n1']:
d2 = getattr(dddc2, perm)
d1 = getattr(dddc, perm)
np.testing.assert_allclose(d2.ntri, 2*d1.ntri)
np.testing.assert_allclose(d2.ntri, 2*d1.ntri)
np.testing.assert_allclose(d2.ntri, 2*d1.ntri)
np.testing.assert_allclose(d2.ntri, 2*d1.ntri)
np.testing.assert_allclose(d2.ntri, 2*d1.ntri)
np.testing.assert_allclose(d2.ntri, 2*d1.ntri)
np.testing.assert_allclose(d2.meand1, 2*d1.meand1)
np.testing.assert_allclose(d2.meand2, 2*d1.meand2)
np.testing.assert_allclose(d2.meand3, 2*d1.meand3)
| np.testing.assert_allclose(d2.meanlogd1, 2*d1.meanlogd1) | numpy.testing.assert_allclose |
# Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES
#
# SPDX-License-Identifier: BSD-3-Clause
import copy
import os
import tempfile
try:
import cffi
except ImportError:
cffi = None
import cupy
from cupy import testing
import numpy
import pytest
import cuquantum
from cuquantum import ComputeType, cudaDataType
from cuquantum import custatevec as cusv
###################################################################
#
# As of beta 2, the test suite for Python bindings is kept minimal.
# The sole goal is to ensure the Python arguments are properly
# passed to the C level. We do not ensure coverage nor correctness.
# This decision will be revisited in the future.
#
###################################################################
dtype_to_data_type = {
numpy.dtype(numpy.complex64): cudaDataType.CUDA_C_32F,
numpy.dtype(numpy.complex128): cudaDataType.CUDA_C_64F,
}
dtype_to_compute_type = {
numpy.dtype(numpy.complex64): ComputeType.COMPUTE_32F,
numpy.dtype(numpy.complex128): ComputeType.COMPUTE_64F,
}
@pytest.fixture()
def handle():
h = cusv.create()
yield h
cusv.destroy(h)
@testing.parameterize(*testing.product({
'n_qubits': (3,),
'dtype': (numpy.complex64, numpy.complex128),
}))
class TestSV:
# Base class for all statevector tests
def get_sv(self):
arr = cupy.zeros((2**self.n_qubits,), dtype=self.dtype)
arr[0] = 1 # initialize in |000...00>
return arr
# TODO: make this a static method
def _return_data(self, data, name, dtype, return_value):
if return_value == 'int':
if len(data) == 0:
# empty, give it a NULL
return 0, 0
else:
# return int as void*
data = numpy.asarray(data, dtype=dtype)
setattr(self, name, data) # keep data alive
return data.ctypes.data, data.size
elif return_value == 'seq':
# data itself is already a flat sequence
return data, len(data)
else:
assert False
@pytest.fixture()
def multi_gpu_handles():
# TODO: consider making this class more flexible
# (ex: arbitrary number of qubits and/or devices, etc)
n_devices = 2 # should be power of 2
handles = []
for dev in range(n_devices):
with cupy.cuda.Device(dev):
h = cusv.create()
handles.append(h)
yield handles
for dev in range(n_devices):
with cupy.cuda.Device(dev):
h = handles.pop(0)
cusv.destroy(h)
def get_exponent(n):
assert (n % 2) == 0
exponent = 1
while True:
out = n >> exponent
if out != 1:
exponent += 1
else:
break
return exponent
@testing.parameterize(*testing.product({
'n_qubits': (4,),
'dtype': (numpy.complex64, numpy.complex128),
}))
class TestMultiGpuSV:
# TODO: consider making this class more flexible
# (ex: arbitrary number of qubits and/or devices, etc)
n_devices = 2 # should be power of 2
def get_sv(self):
self.n_global_bits = get_exponent(self.n_devices)
self.n_local_bits = self.n_qubits - self.n_global_bits
self.sub_sv = []
for dev in range(self.n_devices):
with cupy.cuda.Device(dev):
self.sub_sv.append(cupy.zeros(
2**self.n_local_bits, dtype=self.dtype))
self.sub_sv[0][0] = 1 # initialize in |000...00>
return self.sub_sv
# TODO: make this a static method
def _return_data(self, data, name, dtype, return_value):
if return_value == 'int':
if len(data) == 0:
# empty, give it a NULL
return 0, 0
else:
# return int as void*
data = numpy.asarray(data, dtype=dtype)
setattr(self, name, data) # keep data alive
return data.ctypes.data, data.size
elif return_value == 'seq':
# data itself is already a flat sequence
return data, len(data)
else:
assert False
class TestLibHelper:
def test_get_version(self):
ver = cusv.get_version()
assert ver == (cusv.MAJOR_VER * 1000
+ cusv.MINOR_VER * 100
+ cusv.PATCH_VER)
assert ver == cusv.VERSION
def test_get_property(self):
assert cusv.MAJOR_VER == cusv.get_property(
cuquantum.libraryPropertyType.MAJOR_VERSION)
assert cusv.MINOR_VER == cusv.get_property(
cuquantum.libraryPropertyType.MINOR_VERSION)
assert cusv.PATCH_VER == cusv.get_property(
cuquantum.libraryPropertyType.PATCH_LEVEL)
# we don't wanna recompile for every test case...
_cffi_mod1 = None
_cffi_mod2 = None
def _can_use_cffi():
if cffi is None or os.environ.get('CUDA_PATH') is None:
return False
else:
return True
class MemoryResourceFactory:
def __init__(self, source, name=None):
self.source = source
self.name = source if name is None else name
def get_dev_mem_handler(self):
if self.source == "py-callable":
return (*self._get_cuda_callable(), self.name)
elif self.source == "cffi":
# ctx is not needed, so set to NULL
return (0, *self._get_functor_address(), self.name)
elif self.source == "cffi_struct":
return self._get_handler_address()
# TODO: add more different memory sources
else:
raise NotImplementedError
def _get_cuda_callable(self):
def alloc(size, stream):
return cupy.cuda.runtime.mallocAsync(size, stream)
def free(ptr, size, stream):
cupy.cuda.runtime.freeAsync(ptr, stream)
return alloc, free
def _get_functor_address(self):
if not _can_use_cffi():
raise RuntimeError
global _cffi_mod1
if _cffi_mod1 is None:
import importlib
mod_name = f"cusv_test_{self.source}"
ffi = cffi.FFI()
ffi.set_source(mod_name, """
#include <cuda_runtime.h>
// cffi limitation: we can't use the actual type cudaStream_t because
// it's considered an "incomplete" type and we can't get the functor
// address by doing so...
int my_alloc(void* ctx, void** ptr, size_t size, void* stream) {
return (int)cudaMallocAsync(ptr, size, stream);
}
int my_free(void* ctx, void* ptr, size_t size, void* stream) {
return (int)cudaFreeAsync(ptr, stream);
}
""",
include_dirs=[os.environ['CUDA_PATH']+'/include'],
library_dirs=[os.environ['CUDA_PATH']+'/lib64'],
libraries=['cudart'],
)
ffi.cdef("""
int my_alloc(void* ctx, void** ptr, size_t size, void* stream);
int my_free(void* ctx, void* ptr, size_t size, void* stream);
""")
ffi.compile(verbose=True)
self.ffi = ffi
_cffi_mod1 = importlib.import_module(mod_name)
self.ffi_mod = _cffi_mod1
alloc_addr = self._get_address("my_alloc")
free_addr = self._get_address("my_free")
return alloc_addr, free_addr
def _get_handler_address(self):
if not _can_use_cffi():
raise RuntimeError
global _cffi_mod2
if _cffi_mod2 is None:
import importlib
mod_name = f"cusv_test_{self.source}"
ffi = cffi.FFI()
ffi.set_source(mod_name, """
#include <cuda_runtime.h>
// cffi limitation: we can't use the actual type cudaStream_t because
// it's considered an "incomplete" type and we can't get the functor
// address by doing so...
int my_alloc(void* ctx, void** ptr, size_t size, void* stream) {
return (int)cudaMallocAsync(ptr, size, stream);
}
int my_free(void* ctx, void* ptr, size_t size, void* stream) {
return (int)cudaFreeAsync(ptr, stream);
}
typedef struct {
void* ctx;
int (*device_alloc)(void* ctx, void** ptr, size_t size, void* stream);
int (*device_free)(void* ctx, void* ptr, size_t size, void* stream);
char name[64];
} myHandler;
myHandler* init_myHandler(myHandler* h, const char* name) {
h->ctx = NULL;
h->device_alloc = my_alloc;
h->device_free = my_free;
memcpy(h->name, name, 64);
return h;
}
""",
include_dirs=[os.environ['CUDA_PATH']+'/include'],
library_dirs=[os.environ['CUDA_PATH']+'/lib64'],
libraries=['cudart'],
)
ffi.cdef("""
typedef struct {
...;
} myHandler;
myHandler* init_myHandler(myHandler* h, const char* name);
""")
ffi.compile(verbose=True)
self.ffi = ffi
_cffi_mod2 = importlib.import_module(mod_name)
self.ffi_mod = _cffi_mod2
h = self.handler = self.ffi_mod.ffi.new("myHandler*")
self.ffi_mod.lib.init_myHandler(h, self.name.encode())
return self._get_address(h)
def _get_address(self, func_name_or_ptr):
if isinstance(func_name_or_ptr, str):
func_name = func_name_or_ptr
data = str(self.ffi_mod.ffi.addressof(self.ffi_mod.lib, func_name))
else:
ptr = func_name_or_ptr # ptr to struct
data = str(self.ffi_mod.ffi.addressof(ptr[0]))
# data has this format: "<cdata 'int(*)(void *, void * *, size_t, void *)' 0x7f6c5da37300>"
return int(data.split()[-1][:-1], base=16)
class TestHandle:
def test_handle_create_destroy(self, handle):
# simple rount-trip test
pass
def test_workspace(self, handle):
default_workspace_size = cusv.get_default_workspace_size(handle)
# this is about 18MB as of cuQuantum beta 1
assert default_workspace_size > 0
# cuStateVec does not like a smaller workspace...
size = 24*1024**2
assert size > default_workspace_size
memptr = cupy.cuda.alloc(size)
cusv.set_workspace(handle, memptr.ptr, size) # should not fail
def test_stream(self, handle):
# default is on the null stream
assert 0 == cusv.get_stream(handle)
# simple set/get round-trip
stream = cupy.cuda.Stream()
cusv.set_stream(handle, stream.ptr)
assert stream.ptr == cusv.get_stream(handle)
class TestAbs2Sum(TestSV):
@pytest.mark.parametrize(
'input_form', (
{'basis_bits': (numpy.int32, 'int'),},
{'basis_bits': (numpy.int32, 'seq'),},
)
)
def test_abs2sum_on_z_basis(self, handle, input_form):
sv = self.get_sv()
basis_bits = list(range(self.n_qubits))
basis_bits, basis_bits_len = self._return_data(
basis_bits, 'basis_bits', *input_form['basis_bits'])
data_type = dtype_to_data_type[sv.dtype]
# case 1: both are computed
sum0, sum1 = cusv.abs2sum_on_z_basis(
handle, sv.data.ptr, data_type, self.n_qubits,
True, True, basis_bits, basis_bits_len)
assert | numpy.allclose(sum0+sum1, 1) | numpy.allclose |
import numpy as np
class Matrices:
_NAMES = ["I", "H", "X", "Y", "Z", "CNOT", "CZ", "SWAP", "TOFFOLI"]
def __init__(self, backend):
self.backend = backend
self._I = None
self._H = None
self._X = None
self._Y = None
self._Z = None
self._CNOT = None
self._CZ = None
self._SWAP = None
self._TOFFOLI = None
self.allocate_matrices()
def allocate_matrices(self):
for name in self._NAMES:
getattr(self, f"_set{name}")()
@property
def dtype(self):
return self.backend._dtypes.get('DTYPECPX')
@property
def I(self):
return self._I
@property
def H(self):
return self._H
@property
def X(self):
return self._X
@property
def Y(self):
return self._Y
@property
def Z(self):
return self._Z
@property
def CNOT(self):
return self._CNOT
@property
def CZ(self):
return self._CZ
@property
def SWAP(self):
return self._SWAP
@property
def TOFFOLI(self):
return self._TOFFOLI
def _setI(self):
self._I = self.backend.cast(np.eye(2, dtype=self.dtype))
def _setH(self):
m = | np.ones((2, 2), dtype=self.dtype) | numpy.ones |
import numpy as np
import starry
import matplotlib.pyplot as plt
from scipy.interpolate import interp1d
from tqdm import tqdm
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
import pytest
@pytest.mark.parametrize(
"xs,ys,zs,ro",
[
[1.0, 2.0, -1.0, 0.6],
[1.0, 2.0, -1.0, 5.0],
[1.0, 2.0, -1.0, 5.0],
[1.0, 2.0, -1.0, 50.0],
[0.0, 2.0, -1.0, 0.4],
[0.0, -1.0, -1.0, 0.4],
[0.0, -1.0, -1.0, 0.4],
[1.0, 0.0, -1.0, 0.4],
[1.0, 0.0, -1.0, 0.1],
[1.0, 0.0, -1.0, 0.8],
[1.0, 0.0, 0.0, 0.8],
],
)
def test_edges(
xs, ys, zs, ro, y=[1, 1, 1], ns=100, nb=50, res=999, atol=1e-2, plot=False
):
# Instantiate
ydeg = np.sqrt(len(y) + 1) - 1
map = starry.Map(ydeg=ydeg, reflected=True)
map[1:, :] = y
# bo - ro singularities
singularities = [ro - 1, 0, ro, 1, 1 - ro, 1 + ro]
labels = [
"$b_o = r_o - 1$",
"$b_o = 0$",
"$b_o = r_o$",
"$b_o = 1$",
"$b_o = 1 - r_o$",
"$b_o = 1 + r_o$",
"grazing",
"grazing",
]
# Find where the occultor grazes the terminator
rs = np.sqrt(xs ** 2 + ys ** 2 + zs ** 2)
b = -zs / rs
theta = -np.arctan2(xs, ys)
tol = 1e-15
nx = 10
c = | np.cos(theta) | numpy.cos |
'''
Question 2 Skeleton Code
Here you should implement and evaluate the Conditional Gaussian classifier.
'''
import data
import numpy as np
# Import pyplot - plt.imshow is useful!
import matplotlib.pyplot as plt
from scipy.special import logsumexp
def compute_mean_mles(train_data, train_labels):
'''
Compute the mean estimate for each digit class
Should return a numpy array of size (10,64)
The ith row will correspond to the mean estimate for digit class i
'''
means = np.zeros((10, 64))
# Compute means
for k in range(10):
X = data.get_digits_by_label(train_data, train_labels, k)
means[k] = np.sum(X, axis=0) / X.shape[0]
return means
def compute_sigma_mles(train_data, train_labels):
'''
Compute the covariance estimate for each digit class
Should return a three dimensional numpy array of shape (10, 64, 64)
consisting of a covariance matrix for each digit class
'''
covariances = np.zeros((10, 64, 64))
# Compute covariances
means = compute_mean_mles(train_data, train_labels)
for k in range(10):
X = data.get_digits_by_label(train_data, train_labels, k)
covariances[k] = ((X - means[k]).T @ (X - means[k])) / X.shape[0] + 0.01 * np.identity(64)
return covariances
def generative_likelihood(digits, means, covariances):
'''
Compute the generative log-likelihood:
log p(x|y,mu,Sigma)
Should return an n x 10 numpy array
'''
res = np.zeros((digits.shape[0], 10))
for k in range(10):
temp = ((2 * np.pi) ** (-digits.shape[1] / 2)) * (np.linalg.det(covariances[k]) ** (-1/2)) * \
np.exp(-0.5 * np.diag((digits - means[k]) @ np.linalg.inv(covariances[k]) @ (digits - means[k]).T))
res[:, k] = np.log(temp)
return res
def conditional_likelihood(digits, means, covariances):
'''
Compute the conditional likelihood:
log p(y|x, mu, Sigma)
This should be a numpy array of shape (n, 10)
Where n is the number of datapoints and 10 corresponds to each digit class
'''
numerator = generative_likelihood(digits, means, covariances) + np.log(0.1)
denominator = logsumexp(numerator, axis=1).reshape(-1, 1)
return numerator - denominator
def avg_conditional_likelihood(digits, labels, means, covariances):
'''
Compute the average conditional likelihood over the true class labels
AVG( log p(y_i|x_i, mu, Sigma) )
i.e. the average log likelihood that the model assigns to the correct class label
'''
cond_likelihood = conditional_likelihood(digits, means, covariances)
counter = 0
for i in range(digits.shape[0]):
counter += cond_likelihood[i][int(labels[i])]
# Compute as described above and return
return counter / digits.shape[0]
def classify_data(digits, means, covariances):
'''
Classify new points by taking the most likely posterior class
'''
cond_likelihood = conditional_likelihood(digits, means, covariances)
# Compute and return the most likely class
return np.argmax(cond_likelihood, axis=1)
def main():
train_data, train_labels, test_data, test_labels = data.load_all_data('data')
# Fit the model
means = compute_mean_mles(train_data, train_labels)
covariances = compute_sigma_mles(train_data, train_labels)
# Evaluation
# Question 1, report avg cond loglikelihood
print('avg cond loglikelihood on training set = {}.'.format(avg_conditional_likelihood(train_data, train_labels, means, covariances)))
print('avg cond loglikelihood on test set = {}.'.format(avg_conditional_likelihood(test_data, test_labels, means, covariances)))
# Question 2, predict
train_pred = classify_data(train_data, means, covariances)
train_accuracy = np.sum(np.equal(train_pred, train_labels)) / train_labels.shape[0]
print('Training accuracy = {}.'.format(train_accuracy))
test_pred = classify_data(test_data, means, covariances)
test_accuracy = np.sum( | np.equal(test_pred, test_labels) | numpy.equal |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
from collections import namedtuple
import random
import numpy as np
import pandas as pd
import tensorflow as tf
import dltk.core.modules as modules
from dualstream_fcn_v2 import DualStreamFCN_v2
from dltk.core import metrics as metrics
#from dltk.core.io.sliding_window import SlidingWindow
from utils_dualstream import sliding_window_segmentation_inference
import SimpleITK as sitk
import reader as reader
# Training parameters
training_params = namedtuple('training_params',
'max_steps, batch_size, save_summary_sec, save_model_sec, steps_eval')
training_params.__new__.__defaults__ = (2e5, 16, 10, 600, 100) #TODO: 16 BS for sorbus, 40 for monal04 1e6
tps = training_params()
num_classes = 5
num_channels = 1
label_ids = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 13, 19, 20, 21, 28]
def resize_sitk(inp, reference):
resampleSliceFilter = sitk.ResampleImageFilter()
resampled = resampleSliceFilter.Execute(inp, reference.GetSize(), sitk.Transform(), sitk.sitkNearestNeighbor,
inp.GetOrigin(), reference.GetSpacing(), inp.GetDirection(),
0, inp.GetPixelIDValue())
return resampled
def validate(ops, session, supervisor, name, v_all=True):
"""
Run an inference on a validation dataset
Parameters
----------
ops : dict
a dictionary containing all validation ops
session : tf.session object
supervisor : tf.Supervisor object
Returns
-------
"""
# Pick num_validation_examples datasets to validate on
if v_all:
num_validation_examples = len(ops['filenames'])
else:
num_validation_examples = 4
val_idx = range(num_validation_examples)
# Track loss and Dice similarity coefficients as validation metrics
val_loss = []
val_dscs = []
# Iterate through the datasets and perform a sliding window inference
for f in ops['filenames'][val_idx]:
# Read a validation image and label of arbitrary dimensions
val_x, val_y = ops['read_func']([f])
pid = 'Subj.' + f[0].split('p/')[1][:2]
y_prob = sliding_window_segmentation_inference(session, [ops['y_prob']], {ops['x']: val_x}, {ops['streamid']: 0}, batch_size=1)[0]
y_ = | np.argmax(y_prob, axis=-1) | numpy.argmax |
import matplotlib.pyplot as plt
import tensorflow as tf
import numpy as np
import csv, os
import math
from PIL import Image
from keras.models import load_model
from tensorflow.python.keras.models import load_model
from tensorflow.python.keras.models import Sequential
from tensorflow.python.keras.layers import InputLayer, Input
from tensorflow.python.keras.layers import Reshape, MaxPooling2D
from tensorflow.python.keras.layers import Conv2D, Dense, Flatten, Dropout
from keras.utils.np_utils import to_categorical
from keras.callbacks import Callback
from tensorflow.python.keras.optimizers import Adam
img_size = 512
img_size_flat = img_size * img_size * 3
# Tuple with height, width and depth used to reshape arrays.
# This is used for reshaping in Keras.
img_shape_full = (img_size, img_size, 3)
class_name = "neck_design_labels"
model_file = "Xception0.9/neck_design_labels_model.h5"
weight_file = "Xception0.9/weights.hdf5"
model = load_model(os.path.join("models", class_name, model_file))
model.load_weights(os.path.join("models", class_name, weight_file))
tests = []
rows = []
index = 0
with open('rank/Tests/question.csv', 'r') as csvfile:
reader = csv.reader(csvfile)
for row in reader:
if row[1] != class_name:
continue
image = Image.open("rank/" + row[0])
img_array = np.asarray(image)
if img_array.shape != img_shape_full:
image = image.resize((img_size, img_size), Image.ANTIALIAS)
img_array = | np.asarray(image) | numpy.asarray |
# -*- coding: utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
# Also if needed: retab
'''
TEST equimap
'''
from __future__ import (unicode_literals, absolute_import, \
print_function, division)
import matplotlib.pyplot as plt
import numpy as np
import os
import sys
import time
#import warnings
if __name__ == '__main__':
#print('path 1 =', sys.path)
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
#print('path 2 =', sys.path)
# Local modules
import imas
import equimap
#import imas_west
#import pywed as pw
shot = 53221
tol_val = 1E-10
# For 2D plots
interp_points = 60
# FIRST POINT B_NORM
# ------------------
time_in = | np.linspace(36, 37, 10) | numpy.linspace |
"""
Functions that operate on lists of spaCy documents.
"""
import operator
import string
from collections import Counter, OrderedDict
from functools import partial
import numpy as np
import spacy
from spacy.tokens import Doc
from spacy.vocab import Vocab
from ._common import DEFAULT_LANGUAGE_MODELS, load_stopwords, simplified_pos
from ._tokenfuncs import (
require_tokendocs, token_match, token_match_subsequent, token_glue_subsequent, make_index_window_around_matches,
expand_compound_token
)
from ..utils import require_listlike, require_types, flatten_list, empty_chararray, widen_chararray
from ..bow.dtm import create_sparse_dtm
from .._pd_dt_compat import pd_dt_frame, pd_dt_concat, pd_dt_sort
#%% global spaCy nlp instance
#: Global spaCy nlp instance which must be initiated via :func:`tmtoolkit.preprocess.init_for_language` when using
#: the functional preprocess API
nlp = None
#%% initialization and tokenization
def init_for_language(language=None, language_model=None, **spacy_opts):
"""
Initialize the functional API for a given language code `language` or a spaCy language model `language_model`.
The spaCy nlp instance will be returned and will also be used by default in all subsequent preprocess API calls.
:param language: two-letter ISO 639-1 language code (lowercase)
:param language_model: spaCy language model `language_model`
:param spacy_opts: additional keyword arguments passed to ``spacy.load()``
:return: spaCy nlp instance
"""
if language is None and language_model is None:
raise ValueError('either `language` or `language_model` must be given')
if language_model is None:
if not isinstance(language, str) or len(language) != 2:
raise ValueError('`language` must be a two-letter ISO 639-1 language code')
if language not in DEFAULT_LANGUAGE_MODELS:
raise ValueError('language "%s" is not supported' % language)
language_model = DEFAULT_LANGUAGE_MODELS[language] + '_sm'
spacy_kwargs = dict(disable=['parser', 'ner'])
spacy_kwargs.update(spacy_opts)
global nlp
nlp = spacy.load(language_model, **spacy_kwargs)
return nlp
def tokenize(docs, as_spacy_docs=True, doc_labels=None, doc_labels_fmt='doc-{i1}', enable_vectors=False,
nlp_instance=None):
"""
Tokenize a list or dict of documents `docs`, where each element contains the raw text of the document as string.
Requires that :func:`~tmtoolkit.preprocess.init_for_language` is called before or `nlp_instance` is passed.
:param docs: list or dict of documents with raw text strings; if dict, use dict keys as document labels
:param as_spacy_docs: if True, return list of spaCy ``Doc`` objects, otherwise return list of string tokens
:param doc_labels: if not None and `docs` is a list, use strings in this list as document labels
:param doc_labels_fmt: if `docs` is a list and `doc_labels` is None, generate document labels according to this
format, where ``{i0}`` or ``{i1}`` are replaced by the respective zero- or one-indexed
document numbers
:param enable_vectors: if True, generate word vectors (aka word embeddings) during tokenization;
this will be more computationally expensive
:param nlp_instance: spaCy nlp instance
:return: list of spaCy ``Doc`` documents if `as_spacy_docs` is True (default) or list of string token documents
"""
dictlike = hasattr(docs, 'keys') and hasattr(docs, 'values')
if not isinstance(docs, (list, tuple)) and not dictlike:
raise ValueError('`docs` must be a list, tuple or dict-like object')
if not isinstance(doc_labels_fmt, str):
raise ValueError('`doc_labels_fmt` must be a string')
_nlp = _current_nlp(nlp_instance)
if doc_labels is None:
if dictlike:
doc_labels = docs.keys()
docs = docs.values()
elif as_spacy_docs:
doc_labels = [doc_labels_fmt.format(i0=i, i1=i+1) for i in range(len(docs))]
elif len(doc_labels) != len(docs):
raise ValueError('`doc_labels` must have same length as `docs`')
if enable_vectors:
tokenized_docs = [_nlp(d) for d in docs]
else:
tokenized_docs = [_nlp.make_doc(d) for d in docs]
del docs
if as_spacy_docs:
for dl, doc in zip(doc_labels, tokenized_docs):
doc._.label = dl
_init_doc(doc)
return tokenized_docs
else:
return [[t.text for t in doc] for doc in tokenized_docs]
#%% functions that operate on lists of string tokens *or* spacy documents
def doc_tokens(docs, to_lists=False):
"""
If `docs` is a list of spaCy documents, return the (potentially filtered) tokens from these documents as list of
string tokens, otherwise return the input list as-is.
:param docs: list of string tokens or spaCy documents
:param to_lists: if `docs` is list of spaCy documents or list of NumPy arrays, convert result to lists
:return: list of string tokens as NumPy arrays (default) or lists (if `to_lists` is True)
"""
require_spacydocs_or_tokens(docs)
if to_lists:
fn = partial(_filtered_doc_tokens, as_list=True)
else:
fn = _filtered_doc_tokens
return list(map(fn, docs))
def tokendocs2spacydocs(docs, vocab=None, doc_labels=None, return_vocab=False):
"""
Create new spaCy documents from token lists in `docs`.
.. note:: spaCy doesn't handle empty tokens (`""`), hence these tokens will not appear in the resulting spaCy
documents if they exist in the input documents.
:param docs: list of document tokens
:param vocab: provide vocabulary to be used when generating spaCy documents; if no vocabulary is given, it will be
generated from `docs`
:param doc_labels: optional list of document labels; if given, must be of same length as `docs`
:param return_vocab: if True, additionally return generated vocabulary as spaCy `Vocab` object
:return: list of spaCy documents or tuple with additional generated vocabulary if `return_vocab` is True
"""
require_tokendocs(docs)
if doc_labels is not None and len(doc_labels) != len(docs):
raise ValueError('`doc_labels` must have the same length as `docs`')
if vocab is None:
vocab = Vocab(strings=list(vocabulary(docs) - {''}))
else:
vocab = Vocab(strings=vocab.tolist() if isinstance(vocab, np.ndarray) else list(vocab))
spacydocs = []
for i, tokdoc in enumerate(docs):
spacydocs.append(spacydoc_from_tokens(tokdoc, vocab=vocab, label='' if doc_labels is None else doc_labels[i]))
if return_vocab:
return spacydocs, vocab
else:
return spacydocs
def doc_lengths(docs):
"""
Return document length (number of tokens in doc.) for each document.
:param docs: list of string tokens or spaCy documents
:return: list of document lengths
"""
require_spacydocs_or_tokens(docs)
return list(map(len, doc_tokens(docs)))
def vocabulary(docs, sort=False):
"""
Return vocabulary, i.e. set of all tokens that occur at least once in at least one of the documents in `docs`.
:param docs: list of string tokens or spaCy documents
:param sort: return as sorted list
:return: either set of token strings or sorted list if `sort` is True
"""
require_spacydocs_or_tokens(docs)
v = set(flatten_list(doc_tokens(docs, to_lists=True)))
if sort:
return sorted(v)
else:
return v
def vocabulary_counts(docs):
"""
Return :class:`collections.Counter()` instance of vocabulary containing counts of occurrences of tokens across
all documents.
:param docs: list of string tokens or spaCy documents
:return: :class:`collections.Counter()` instance of vocabulary containing counts of occurrences of tokens across
all documents
"""
require_spacydocs_or_tokens(docs)
return Counter(flatten_list(doc_tokens(docs)))
def doc_frequencies(docs, proportions=False):
"""
Document frequency per vocabulary token as dict with token to document frequency mapping.
Document frequency is the measure of how often a token occurs *at least once* in a document.
Example with absolute document frequencies:
.. code-block:: text
doc tokens
--- ------
A z, z, w, x
B y, z, y
C z, z, y, z
document frequency df(z) = 3 (occurs in all 3 documents)
df(x) = df(w) = 1 (occur only in A)
df(y) = 2 (occurs in B and C)
...
:param docs: list of string tokens or spaCy documents
:param proportions: if True, normalize by number of documents to obtain proportions
:return: dict mapping token to document frequency
"""
require_spacydocs_or_tokens(docs)
doc_freqs = Counter()
for dtok in docs:
for t in set(_filtered_doc_tokens(dtok, as_list=True)):
doc_freqs[t] += 1
if proportions:
n_docs = len(docs)
return {w: n/n_docs for w, n in doc_freqs.items()}
else:
return doc_freqs
def ngrams(docs, n, join=True, join_str=' '):
"""
Generate and return n-grams of length `n`.
:param docs: list of string tokens or spaCy documents
:param n: length of n-grams, must be >= 2
:param join: if True, join generated n-grams by string `join_str`
:param join_str: string used for joining
:return: list of n-grams; if `join` is True, the list contains strings of joined n-grams, otherwise the list
contains lists of size `n` in turn containing the strings that make up the n-gram
"""
require_spacydocs_or_tokens(docs)
return [_ngrams_from_tokens(_filtered_doc_tokens(dtok, as_list=True), n=n, join=join, join_str=join_str)
for dtok in docs]
def sparse_dtm(docs, vocab=None):
"""
Create a sparse document-term-matrix (DTM) from a list of tokenized documents `docs`. If `vocab` is None, determine
the vocabulary (unique terms) from `docs`, otherwise take `vocab` which must be a *sorted* list or NumPy array.
If `vocab` is None, the generated sorted vocabulary list is returned as second value, else only a single value is
returned -- the DTM.
:param docs: list of string tokens or spaCy documents
:param vocab: optional *sorted* list / NumPy array of vocabulary (unique terms) in `docs`
:return: either a single value (sparse document-term-matrix) or a tuple with sparse DTM and sorted vocabulary if
none was passed
"""
require_spacydocs_or_tokens(docs)
if vocab is None:
vocab = vocabulary(docs, sort=True)
return_vocab = True
else:
return_vocab = False
tokens = doc_tokens(docs)
alloc_size = sum(len(set(dtok)) for dtok in tokens) # sum of *unique* tokens in each document
dtm = create_sparse_dtm(vocab, tokens, alloc_size, vocab_is_sorted=True)
if return_vocab:
return dtm, vocab
else:
return dtm
def kwic(docs, search_tokens, context_size=2, match_type='exact', ignore_case=False,
glob_method='match', inverse=False, with_metadata=False, as_dict=False, as_datatable=False, non_empty=False,
glue=None, highlight_keyword=None):
"""
Perform keyword-in-context (kwic) search for search pattern(s) `search_tokens`. Returns result as list of KWIC
windows or datatable / dataframe. If you want to filter with KWIC, use
:func:`~tmtoolkit.preprocess.filter_tokens_with_kwic`, which returns results as list of tokenized documents (same
structure as `docs`).
Uses similar search parameters as :func:`~tmtoolkit.preprocess.filter_tokens`.
:param docs: list of string tokens or spaCy documents
:param search_tokens: single string or list of strings that specify the search pattern(s)
:param context_size: either scalar int or tuple (left, right) -- number of surrounding words in keyword context.
if scalar, then it is a symmetric surrounding, otherwise can be asymmetric.
:param match_type: the type of matching that is performed: ``'exact'`` does exact string matching (optionally
ignoring character case if ``ignore_case=True`` is set); ``'regex'`` treats ``search_tokens``
as regular expressions to match the tokens against; ``'glob'`` uses "glob patterns" like
``"politic*"`` which matches for example "politic", "politics" or ""politician" (see
`globre package <https://pypi.org/project/globre/>`_)
:param ignore_case: ignore character case (applies to all three match types)
:param glob_method: if `match_type` is 'glob', use this glob method. Must be 'match' or 'search' (similar
behavior as Python's :func:`re.match` or :func:`re.search`)
:param inverse: inverse the match results for filtering (i.e. *remove* all tokens that match the search
criteria)
:param with_metadata: also return metadata (like POS) along with each token
:param as_dict: if True, return result as dict with document labels mapping to KWIC results
:param as_datatable: return result as data frame with indices "doc" (document label) and "context" (context
ID per document) and optionally "position" (original token position in the document) if
tokens are not glued via `glue` parameter
:param non_empty: if True, only return non-empty result documents
:param glue: if not None, this must be a string which is used to combine all tokens per match to a single string
:param highlight_keyword: if not None, this must be a string which is used to indicate the start and end of the
matched keyword
:return: return either as: (1) list with KWIC results per document, (2) as dict with document labels mapping to
KWIC results when `as_dict` is True or (3) dataframe / datatable when `as_datatable` is True
"""
if as_dict or as_datatable:
require_spacydocs(docs) # because we need the document labels later
else:
require_spacydocs_or_tokens(docs)
if isinstance(context_size, int):
context_size = (context_size, context_size)
else:
require_listlike(context_size)
if highlight_keyword is not None and not isinstance(highlight_keyword, str):
raise ValueError('if `highlight_keyword` is given, it must be of type str')
if glue:
if with_metadata or as_datatable:
raise ValueError('when `glue` is set to True, `with_metadata` and `as_datatable` must be False')
if not isinstance(glue, str):
raise ValueError('if `glue` is given, it must be of type str')
kwic_raw = _build_kwic(docs, search_tokens,
highlight_keyword=highlight_keyword,
with_metadata=with_metadata,
with_window_indices=as_datatable,
context_size=context_size,
match_type=match_type,
ignore_case=ignore_case,
glob_method=glob_method,
inverse=inverse)
if as_dict or as_datatable:
kwic_raw = dict(zip(doc_labels(docs), kwic_raw))
return _finalize_kwic_results(kwic_raw,
non_empty=non_empty,
glue=glue,
as_datatable=as_datatable,
with_metadata=with_metadata)
def kwic_table(docs, search_tokens, context_size=2, match_type='exact', ignore_case=False,
glob_method='match', inverse=False, glue=' ', highlight_keyword='*'):
"""
Shortcut for :func:`~tmtoolkit.preprocess.kwic()` to directly return a data frame table with highlighted keywords
in context.
:param docs: list of string tokens or spaCy documents
:param search_tokens: single string or list of strings that specify the search pattern(s)
:param context_size: either scalar int or tuple (left, right) -- number of surrounding words in keyword context.
if scalar, then it is a symmetric surrounding, otherwise can be asymmetric.
:param match_type: One of: 'exact', 'regex', 'glob'. If 'regex', `search_token` must be RE pattern. If `glob`,
`search_token` must be a "glob" pattern like "hello w*"
(see https://github.com/metagriffin/globre).
:param ignore_case: If True, ignore case for matching.
:param glob_method: If `match_type` is 'glob', use this glob method. Must be 'match' or 'search' (similar
behavior as Python's :func:`re.match` or :func:`re.search`).
:param inverse: Invert the matching results.
:param glue: If not None, this must be a string which is used to combine all tokens per match to a single string
:param highlight_keyword: If not None, this must be a string which is used to indicate the start and end of the
matched keyword.
:return: datatable or pandas DataFrame with columns "doc" (document label), "context" (context ID per document) and
"kwic" containing strings with highlighted keywords in context.
"""
kwic_raw = kwic(docs, search_tokens,
context_size=context_size,
match_type=match_type,
ignore_case=ignore_case,
glob_method=glob_method,
inverse=inverse,
with_metadata=False,
as_dict=True,
as_datatable=False,
non_empty=True,
glue=glue,
highlight_keyword=highlight_keyword)
return _datatable_from_kwic_results(kwic_raw)
def glue_tokens(docs, patterns, glue='_', match_type='exact', ignore_case=False, glob_method='match', inverse=False,
return_glued_tokens=False):
"""
Match N *subsequent* tokens to the N patterns in `patterns` using match options like in
:func:`~tmtoolkit.preprocess.filter_tokens`. Join the matched tokens by glue string `glue`. Replace these tokens
in the documents.
If there is metadata, the respective entries for the joint tokens are set to None.
.. note:: If `docs` is a list of spaCy documents, this modifies the documents in `docs` in place.
:param docs: list of string tokens or spaCy documents
:param patterns: a sequence of search patterns as excepted by :func:`~tmtoolkit.preprocess.filter_tokens`
:param glue: string for joining the subsequent matches
:param match_type: one of: 'exact', 'regex', 'glob'; if 'regex', `search_token` must be RE pattern; if `glob`,
`search_token` must be a "glob" pattern like "hello w*"
(see https://github.com/metagriffin/globre)
:param ignore_case: if True, ignore case for matching
:param glob_method: if `match_type` is 'glob', use this glob method; must be 'match' or 'search' (similar
behavior as Python's :func:`re.match` or :func:`re.search`)
:param inverse: invert the matching results
:param return_glued_tokens: if True, additionally return a set of tokens that were glued
:return: updated documents `docs` if `docs` is a list of spaCy documents or otherwise a list of string token
documents; if `return_glued_tokens` is True, return 2-tuple with additional set of tokens that were glued
"""
is_spacydocs = require_spacydocs_or_tokens(docs)
glued_tokens = set()
if is_spacydocs is not None:
match_opts = {'match_type': match_type, 'ignore_case': ignore_case, 'glob_method': glob_method}
# all documents must be compact before applying "token_glue_subsequent"
if is_spacydocs:
docs = compact_documents(docs)
res = []
for doc in docs:
# no need to use _filtered_doc_tokens() here because tokens are compact already
matches = token_match_subsequent(patterns, _filtered_doc_tokens(doc), **match_opts)
if inverse:
matches = [~m for m in matches]
if is_spacydocs:
new_doc, glued = doc_glue_subsequent(doc, matches, glue=glue, return_glued=True)
else:
new_doc, glued = token_glue_subsequent(doc, matches, glue=glue, return_glued=True)
res.append(new_doc)
glued_tokens.update(glued)
else:
res = []
if return_glued_tokens:
return res, glued_tokens
else:
return res
def expand_compounds(docs, split_chars=('-',), split_on_len=2, split_on_casechange=False):
"""
Expand all compound tokens in documents `docs`, e.g. splitting token "US-Student" into two tokens "US" and
"Student".
:param docs: list of string tokens or spaCy documents
:param split_chars: characters to split on
:param split_on_len: minimum length of a result token when considering splitting (e.g. when ``split_on_len=2``
"e-mail" would not be split into "e" and "mail")
:param split_on_casechange: use case change to split tokens, e.g. "CamelCase" would become "Camel", "Case"
:return: list of string tokens or spaCy documents, depending on `docs`
"""
is_spacydocs = require_spacydocs_or_tokens(docs)
if is_spacydocs is None:
return []
exp_comp = partial(expand_compound_token, split_chars=split_chars, split_on_len=split_on_len,
split_on_casechange=split_on_casechange)
list_creator = list if is_spacydocs else flatten_list
exptoks = [list_creator(map(exp_comp, _filtered_doc_tokens(doc))) for doc in docs]
assert len(exptoks) == len(docs)
if not is_spacydocs:
if isinstance(next(iter(docs)), np.ndarray):
return [np.array(d) if d else empty_chararray() for d in exptoks]
else:
return exptoks
new_docs = []
for doc_exptok, doc in zip(exptoks, docs):
words = []
tokens = []
spaces = []
lemmata = []
for exptok, t, oldtok in zip(doc_exptok, doc, _filtered_doc_tokens(doc)):
n_exptok = len(exptok)
spaces.extend([''] * (n_exptok-1) + [t.whitespace_])
if n_exptok > 1:
lemmata.extend(exptok)
words.extend(exptok)
tokens.extend(exptok)
else:
lemmata.append(t.lemma_)
words.append(t.text)
tokens.append(oldtok)
new_doc = Doc(doc.vocab, words=words, spaces=spaces)
new_doc._.label = doc._.label
assert len(new_doc) == len(lemmata)
_init_doc(new_doc, tokens)
for t, lem in zip(new_doc, lemmata):
t.lemma_ = lem
new_docs.append(new_doc)
return new_docs
def transform(docs, func, **kwargs):
"""
Apply `func` to each token in each document of `docs` and return the result.
:param docs: list of string tokens or spaCy documents
:param func: function to apply to each token; should accept a string as first arg. and optional `kwargs`
:param kwargs: keyword arguments passed to `func`
:return: list of string tokens or spaCy documents, depending on `docs`
"""
if not callable(func):
raise ValueError('`func` must be callable')
is_spacydocs = require_spacydocs_or_tokens(docs)
if is_spacydocs is None:
return []
is_arrays = not is_spacydocs and isinstance(next(iter(docs)), np.ndarray)
if kwargs:
func_wrapper = lambda t: func(t, **kwargs)
else:
func_wrapper = func
if is_spacydocs:
labels = doc_labels(docs)
docs = doc_tokens(docs)
res = [list(map(func_wrapper, dtok)) for dtok in docs]
if is_spacydocs:
return tokendocs2spacydocs(res, doc_labels=labels)
elif is_arrays:
return [np.array(d) if d else empty_chararray() for d in res]
else:
return res
def to_lowercase(docs):
"""
Apply lowercase transformation to each document.
:param docs: list of string tokens or spaCy documents
:return: list of string tokens or spaCy documents, depending on `docs`
"""
return transform(docs, str.lower)
def remove_chars(docs, chars):
"""
Remove all characters listed in `chars` from all tokens.
:param docs: list of string tokens or spaCy documents
:param chars: list of characters to remove
:return: list of string tokens or spaCy documents, depending on `docs`
"""
if not chars:
raise ValueError('`chars` must be a non-empty sequence')
is_spacydocs = require_spacydocs_or_tokens(docs)
if is_spacydocs is None:
return []
is_arrays = not is_spacydocs and isinstance(next(iter(docs)), np.ndarray)
if is_spacydocs:
labels = doc_labels(docs)
docs = doc_tokens(docs)
del_chars = str.maketrans('', '', ''.join(chars))
res = [[t.translate(del_chars) for t in dtok] for dtok in docs]
if is_spacydocs:
return tokendocs2spacydocs(res, doc_labels=labels)
elif is_arrays:
return [np.array(d) if d else empty_chararray() for d in res]
else:
return res
def clean_tokens(docs, remove_punct=True, remove_stopwords=True, remove_empty=True,
remove_shorter_than=None, remove_longer_than=None, remove_numbers=False,
nlp_instance=None, language=None):
"""
Apply several token cleaning steps to documents `docs` and optionally documents metadata `docs_meta`, depending on
the given parameters.
:param docs: list of string tokens or spaCy documents
:param remove_punct: if True, remove all tokens marked as ``is_punct`` by spaCy if `docs` are spaCy documents,
otherwise remove tokens that match the characters listed in ``string.punctuation``;
if arg is a list, tuple or set, remove all tokens listed in this arg from the
documents; if False do not apply punctuation token removal
:param remove_stopwords: if True, remove stop words for the given `language` as loaded via
`~tmtoolkit.preprocess.load_stopwords` ; if arg is a list, tuple or set, remove all tokens
listed in this arg from the documents; if False do not apply stop word token removal
:param remove_empty: if True, remove empty strings ``""`` from documents
:param remove_shorter_than: if given a positive number, remove tokens that are shorter than this number
:param remove_longer_than: if given a positive number, remove tokens that are longer than this number
:param remove_numbers: if True, remove all tokens that are deemed numeric by :func:`np.char.isnumeric`
:param nlp_instance: spaCy nlp instance
:param language: language for stop word removal
:return: list of string tokens or spaCy documents, depending on `docs`
"""
if remove_shorter_than is not None and remove_shorter_than < 0:
raise ValueError('`remove_shorter_than` must be >= 0')
if remove_longer_than is not None and remove_longer_than < 0:
raise ValueError('`remove_longer_than` must be >= 0')
is_spacydocs = require_spacydocs_or_tokens(docs)
if is_spacydocs is None:
return []
is_arrays = not is_spacydocs and isinstance(next(iter(docs)), np.ndarray)
# add empty token to list of tokens to remove
tokens_to_remove = [''] if remove_empty else []
# add punctuation characters to list of tokens to remove
if isinstance(remove_punct, (tuple, list, set)):
tokens_to_remove.extend(remove_punct)
# add stopwords to list of tokens to remove
if remove_stopwords is True:
# default stopword list
tokens_to_remove.extend(load_stopwords(language or _current_nlp(nlp_instance).lang))
elif isinstance(remove_stopwords, (tuple, list, set)):
tokens_to_remove.extend(remove_stopwords)
# the "remove masks" list holds a binary array for each document where `True` signals a token to be removed
docs_as_tokens = doc_tokens(docs)
remove_masks = [np.repeat(False, len(doc)) for doc in docs_as_tokens]
# update remove mask for punctuation
if remove_punct is True:
if is_spacydocs:
remove_masks = [mask | doc.to_array('is_punct')[doc.user_data['mask']].astype(np.bool_)
for mask, doc in zip(remove_masks, docs)]
else:
tokens_to_remove.extend(list(string.punctuation))
# update remove mask for tokens shorter/longer than a certain number of characters
if remove_shorter_than is not None or remove_longer_than is not None:
token_lengths = [np.fromiter(map(len, doc), np.int, len(doc)) for doc in docs_as_tokens]
if remove_shorter_than is not None:
remove_masks = [mask | (n < remove_shorter_than) for mask, n in zip(remove_masks, token_lengths)]
if remove_longer_than is not None:
remove_masks = [mask | (n > remove_longer_than) for mask, n in zip(remove_masks, token_lengths)]
# update remove mask for numeric tokens
if remove_numbers:
if is_spacydocs:
remove_masks = [mask | doc.to_array('like_num')[doc.user_data['mask']].astype(np.bool_)
for mask, doc in zip(remove_masks, docs)]
elif is_arrays:
remove_masks = [mask | np.char.isnumeric(doc)
for mask, doc in zip(remove_masks, docs_as_tokens)]
else:
remove_masks = [mask | np.array([t.isnumeric() for t in doc], dtype=np.bool_)
for mask, doc in zip(remove_masks, docs_as_tokens)]
# update remove mask for general list of tokens to be removed
if tokens_to_remove:
tokens_to_remove = set(tokens_to_remove)
# this is actually much faster than using np.isin:
remove_masks = [mask | np.array([t in tokens_to_remove for t in doc], dtype=bool)
for mask, doc in zip(remove_masks, docs_as_tokens)]
# apply the mask
return _apply_matches_array(docs, remove_masks, invert=True)
def filter_tokens_by_mask(docs, mask, inverse=False):
"""
Filter tokens in `docs` according to a binary mask specified by `mask`.
:param docs: list of string tokens or spaCy documents
:param mask: a list containing a mask list for each document in `docs`; each mask list contains boolean values for
each token in that document, where `True` means keeping that token and `False` means removing it;
:param inverse: inverse the mask for filtering, i.e. keep all tokens with a mask set to `False` and remove all those
with `True`
:return: list of string tokens or spaCy documents, depending on `docs`
"""
require_spacydocs_or_tokens(docs)
if len(mask) > 0 and not isinstance(mask[0], np.ndarray):
mask = list(map(lambda x: np.array(x, dtype=np.bool), mask))
return _apply_matches_array(docs, mask, invert=inverse)
def remove_tokens_by_mask(docs, mask):
"""
Same as :func:`~tmtoolkit.preprocess.filter_tokens_by_mask` but with ``inverse=True``.
.. seealso:: :func:`~tmtoolkit.preprocess.filter_tokens_by_mask`
"""
return filter_tokens_by_mask(docs, mask, inverse=True)
def filter_tokens(docs, search_tokens, by_meta=None, match_type='exact', ignore_case=False,
glob_method='match', inverse=False):
"""
Filter tokens in `docs` according to search pattern(s) `search_tokens` and several matching options. Only those
tokens are retained that match the search criteria unless you set ``inverse=True``, which will *remove* all tokens
that match the search criteria (which is the same as calling :func:`~tmtoolkit.preprocess.remove_tokens`).
.. seealso:: :func:`~tmtoolkit.preprocess.remove_tokens` and :func:`~tmtoolkit.preprocess.token_match`
:param docs: list of string tokens or spaCy documents
:param search_tokens: typically a single string or non-empty list of strings that specify the search pattern(s);
when matching against meta data via `by_meta`, may also be of any other type
:param by_meta: if not None, this should be a string of a token meta data attribute; this meta data will then be
used for matching instead of the tokens in `docs`
:param match_type: the type of matching that is performed: ``'exact'`` does exact string matching (optionally
ignoring character case if ``ignore_case=True`` is set); ``'regex'`` treats ``search_tokens``
as regular expressions to match the tokens against; ``'glob'`` uses "glob patterns" like
``"politic*"`` which matches for example "politic", "politics" or ""politician" (see
`globre package <https://pypi.org/project/globre/>`_)
:param ignore_case: ignore character case (applies to all three match types)
:param glob_method: if `match_type` is 'glob', use this glob method. Must be 'match' or 'search' (similar
behavior as Python's :func:`re.match` or :func:`re.search`)
:param inverse: inverse the match results for filtering (i.e. *remove* all tokens that match the search
criteria)
:return: list of string tokens or spaCy documents, depending on `docs`
"""
require_spacydocs_or_tokens(docs)
matches = _token_pattern_matches(_match_against(docs, by_meta), search_tokens, match_type=match_type,
ignore_case=ignore_case, glob_method=glob_method)
return filter_tokens_by_mask(docs, matches, inverse=inverse)
def remove_tokens(docs, search_tokens, by_meta=None, match_type='exact', ignore_case=False, glob_method='match'):
"""
Same as :func:`~tmtoolkit.preprocess.filter_tokens` but with ``inverse=True``.
.. seealso:: :func:`~tmtoolkit.preprocess.filter_tokens` and :func:`~tmtoolkit.preprocess.token_match`.
"""
return filter_tokens(docs, search_tokens=search_tokens, by_meta=by_meta, match_type=match_type,
ignore_case=ignore_case, glob_method=glob_method, inverse=True)
def filter_tokens_with_kwic(docs, search_tokens, context_size=2, match_type='exact', ignore_case=False,
glob_method='match', inverse=False):
"""
Filter tokens in `docs` according to Keywords-in-Context (KWIC) context window of size `context_size` around
`search_tokens`. Works similar to :func:`~tmtoolkit.preprocess.kwic`, but returns result as list of tokenized
documents, i.e. in the same structure as `docs` whereas :func:`~tmtoolkit.preprocess.kwic` returns result as
list of *KWIC windows* into `docs`.
.. seealso:: :func:`~tmtoolkit.preprocess.kwic`
:param docs: list of string tokens or spaCy documents
:param search_tokens: typically a single string or non-empty list of strings that specify the search pattern(s);
when matching against meta data via `by_meta`, may also be of any other type
:param context_size: either scalar int or tuple (left, right) -- number of surrounding words in keyword context.
if scalar, then it is a symmetric surrounding, otherwise can be asymmetric.
:param match_type: the type of matching that is performed: ``'exact'`` does exact string matching (optionally
ignoring character case if ``ignore_case=True`` is set); ``'regex'`` treats ``search_tokens``
as regular expressions to match the tokens against; ``'glob'`` uses "glob patterns" like
``"politic*"`` which matches for example "politic", "politics" or ""politician" (see
`globre package <https://pypi.org/project/globre/>`_)
:param ignore_case: ignore character case (applies to all three match types)
:param glob_method: if `match_type` is 'glob', use this glob method. Must be 'match' or 'search' (similar
behavior as Python's :func:`re.match` or :func:`re.search`)
:param inverse: inverse the match results for filtering (i.e. *remove* all tokens that match the search
criteria)
:return: list of string tokens or spaCy documents, depending on `docs`
"""
require_spacydocs_or_tokens(docs)
if isinstance(context_size, int):
context_size = (context_size, context_size)
else:
require_listlike(context_size)
matches = _build_kwic(docs, search_tokens,
context_size=context_size,
match_type=match_type,
ignore_case=ignore_case,
glob_method=glob_method,
inverse=inverse,
only_token_masks=True)
return filter_tokens_by_mask(docs, matches)
def remove_tokens_by_doc_frequency(docs, which, df_threshold, absolute=False, return_blacklist=False,
return_mask=False):
"""
Remove tokens according to their document frequency.
:param docs: list of string tokens or spaCy documents
:param which: which threshold comparison to use: either ``'common'``, ``'>'``, ``'>='`` which means that tokens
with higher document freq. than (or equal to) `df_threshold` will be removed;
or ``'uncommon'``, ``'<'``, ``'<='`` which means that tokens with lower document freq. than
(or equal to) `df_threshold` will be removed
:param df_threshold: document frequency threshold value
:param docs_meta: list of meta data for each document in `docs`; each element at index ``i`` is a dict containing
the meta data for document ``i``; POS tags must exist for all documents in `docs_meta`
(``"meta_pos"`` key)
:param return_blacklist: if True return a list of tokens that should be removed instead of the filtered tokens
:param return_mask: if True return a list of token masks where each occurrence of True signals a token to
be removed
:return: list of string tokens or spaCy documents, depending on `docs`
"""
require_spacydocs_or_tokens(docs)
which_opts = {'common', '>', '>=', 'uncommon', '<', '<='}
if which not in which_opts:
raise ValueError('`which` must be one of: %s' % ', '.join(which_opts))
n_docs = len(docs)
if absolute:
if not 0 <= df_threshold <= n_docs:
raise ValueError('`df_threshold` must be in range [0, %d]' % n_docs)
else:
if not 0 <= df_threshold <= 1:
raise ValueError('`df_threshold` must be in range [0, 1]')
if which in ('common', '>='):
comp = operator.ge
elif which == '>':
comp = operator.gt
elif which == '<':
comp = operator.lt
else:
comp = operator.le
toks = doc_tokens(docs, to_lists=True)
doc_freqs = doc_frequencies(toks, proportions=not absolute)
mask = [[comp(doc_freqs[t], df_threshold) for t in dtok] for dtok in toks]
if return_blacklist:
blacklist = set(t for t, f in doc_freqs.items() if comp(f, df_threshold))
if return_mask:
return blacklist, mask
if return_mask:
return mask
return remove_tokens_by_mask(docs, mask)
def remove_common_tokens(docs, df_threshold=0.95, absolute=False):
"""
Shortcut for :func:`~tmtoolkit.preprocess.remove_tokens_by_doc_frequency` for removing tokens *above* a certain
document frequency.
:param docs: list of string tokens or spaCy documents
:param df_threshold: document frequency threshold value
:param absolute: if True, use absolute document frequency (i.e. number of times token X occurs at least once
in a document), otherwise use relative document frequency (normalized by number of documents)
:return: list of string tokens or spaCy documents, depending on `docs`
"""
return remove_tokens_by_doc_frequency(docs, 'common', df_threshold=df_threshold, absolute=absolute)
def remove_uncommon_tokens(docs, df_threshold=0.05, absolute=False):
"""
Shortcut for :func:`~tmtoolkit.preprocess.remove_tokens_by_doc_frequency` for removing tokens *below* a certain
document frequency.
:param docs: list of string tokens or spaCy documents
:param df_threshold: document frequency threshold value
:param absolute: if True, use absolute document frequency (i.e. number of times token X occurs at least once
in a document), otherwise use relative document frequency (normalized by number of documents)
:return: list of string tokens or spaCy documents, depending on `docs`
"""
return remove_tokens_by_doc_frequency(docs, 'uncommon', df_threshold=df_threshold, absolute=absolute)
def filter_documents(docs, search_tokens, by_meta=None, matches_threshold=1,
match_type='exact', ignore_case=False, glob_method='match', inverse_result=False,
inverse_matches=False):
"""
This function is similar to :func:`~tmtoolkit.preprocess.filter_tokens` but applies at document level. For each
document, the number of matches is counted. If it is at least `matches_threshold` the document is retained,
otherwise removed. If `inverse_result` is True, then documents that meet the threshold are *removed*.
.. seealso:: :func:`~tmtoolkit.preprocess.remove_documents`
:param docs: list of string tokens or spaCy documents
:param search_tokens: typically a single string or non-empty list of strings that specify the search pattern(s);
when matching against meta data via `by_meta`, may also be of any other type
:param by_meta: if not None, this should be a string of a token meta data attribute; this meta data will then be
used for matching instead of the tokens in `docs`
:param matches_threshold: the minimum number of matches required per document
:param match_type: the type of matching that is performed: ``'exact'`` does exact string matching (optionally
ignoring character case if ``ignore_case=True`` is set); ``'regex'`` treats ``search_tokens``
as regular expressions to match the tokens against; ``'glob'`` uses "glob patterns" like
``"politic*"`` which matches for example "politic", "politics" or ""politician" (see
`globre package <https://pypi.org/project/globre/>`_)
:param ignore_case: ignore character case (applies to all three match types)
:param glob_method: if `match_type` is 'glob', use this glob method. Must be 'match' or 'search' (similar
behavior as Python's :func:`re.match` or :func:`re.search`)
:param inverse_result: inverse the threshold comparison result
:param inverse_matches: inverse the match results for filtering
:return: list of string tokens or spaCy documents, depending on `docs`
"""
require_spacydocs_or_tokens(docs)
matches = _token_pattern_matches(_match_against(docs, by_meta), search_tokens, match_type=match_type,
ignore_case=ignore_case, glob_method=glob_method)
if inverse_matches:
matches = [~m for m in matches]
new_docs = []
for i, (doc, n_matches) in enumerate(zip(docs, map(np.sum, matches))):
thresh_met = n_matches >= matches_threshold
if inverse_result:
thresh_met = not thresh_met
if thresh_met:
new_docs.append(doc)
return new_docs
def remove_documents(docs, search_tokens, by_meta=None, matches_threshold=1,
match_type='exact', ignore_case=False, glob_method='match', inverse_matches=False):
"""
Same as :func:`~tmtoolkit.preprocess.filter_documents` but with ``inverse=True``.
.. seealso:: :func:`~tmtoolkit.preprocess.filter_documents`
"""
return filter_documents(docs, search_tokens, by_meta=by_meta,
matches_threshold=matches_threshold, match_type=match_type, ignore_case=ignore_case,
glob_method=glob_method, inverse_matches=inverse_matches, inverse_result=True)
def filter_documents_by_name(docs, name_patterns, labels=None, match_type='exact', ignore_case=False,
glob_method='match', inverse=False):
"""
Filter documents by their name (i.e. document label). Keep all documents whose name matches `name_pattern`
according to additional matching options. If `inverse` is True, drop all those documents whose name matches,
which is the same as calling :func:`~tmtoolkit.preprocess.remove_documents_by_name`.
:param docs: list of string tokens or spaCy documents
:param search_tokens: typically a single string or non-empty list of strings that specify the search pattern(s);
when matching against meta data via `by_meta`, may also be of any other type
:param labels: if `docs` is not a list of spaCy documents, you must pass the document labels as list of strings
:param match_type: the type of matching that is performed: ``'exact'`` does exact string matching (optionally
ignoring character case if ``ignore_case=True`` is set); ``'regex'`` treats ``search_tokens``
as regular expressions to match the tokens against; ``'glob'`` uses "glob patterns" like
``"politic*"`` which matches for example "politic", "politics" or ""politician" (see
`globre package <https://pypi.org/project/globre/>`_)
:param ignore_case: ignore character case (applies to all three match types)
:param glob_method: if `match_type` is 'glob', use this glob method. Must be 'match' or 'search' (similar
behavior as Python's :func:`re.match` or :func:`re.search`)
:param inverse: invert the matching results
:return: list of string tokens or spaCy documents, depending on `docs`
"""
is_spacydocs = require_spacydocs_or_tokens(docs)
if is_spacydocs is None:
return []
if isinstance(name_patterns, str):
name_patterns = [name_patterns]
else:
require_listlike(name_patterns)
if not name_patterns:
raise ValueError('`name_patterns` must not be empty')
if is_spacydocs and labels is None:
labels = doc_labels(docs)
elif not is_spacydocs and labels is None:
raise ValueError('if not passing a list of spaCy documents as `docs`, you must pass document labels via '
'`labels`')
if len(labels) != len(docs):
raise ValueError('number of document labels must match number of documents')
matches = None
for pat in name_patterns:
pat_match = token_match(pat, labels, match_type=match_type, ignore_case=ignore_case,
glob_method=glob_method)
if matches is None:
matches = pat_match
else:
matches |= pat_match
assert matches is not None
assert len(labels) == len(matches)
if inverse:
matches = ~matches
return [doc for doc, m in zip(docs, matches) if m]
def remove_documents_by_name(docs, name_patterns, labels=None, match_type='exact', ignore_case=False,
glob_method='match'):
"""
Same as :func:`~tmtoolkit.preprocess.filter_documents_by_name` but with ``inverse=True``.
.. seealso:: :func:`~tmtoolkit.preprocess.filter_documents_by_name`
"""
return filter_documents_by_name(docs, name_patterns, labels=labels, match_type=match_type,
ignore_case=ignore_case, glob_method=glob_method, inverse=True)
#%% functions that operate *only* on lists of spacy documents
def doc_labels(docs):
"""
Return list of document labels that are assigned to spaCy documents `docs`.
:param docs: list of spaCy documents
:return: list of document labels
"""
require_spacydocs(docs)
return [d._.label for d in docs]
def compact_documents(docs):
"""
Compact documents `docs` by recreating new documents using the previously applied filters.
:param docs: list of spaCy documents
:return: list with compact spaCy documents
"""
require_spacydocs(docs)
return _apply_matches_array(docs, compact=True)
def pos_tag(docs, tagger=None, nlp_instance=None):
"""
Apply Part-of-Speech (POS) tagging to all documents.
The meanings of the POS tags are described in the
`spaCy documentation <https://spacy.io/api/annotation#pos-tagging>`_.
.. note:: This function only applies POS tagging to the documents but doesn't retrieve the tags. If you want to
retrieve the tags, you may use :func:`~tmtoolkit.preprocess.pos_tags`.
.. note:: This function modifies the documents in `docs` in place and adds/modifies a `pos_` attribute in each
token.
:param docs: list of spaCy documents
:param tagger: POS tagger instance to use; by default, use the tagger for the currently loaded spaCy nlp instance
:param nlp_instance: spaCy nlp instance
:return: input spaCy documents `docs` with in-place modified documents
"""
require_spacydocs(docs)
tagger = tagger or _current_nlp(nlp_instance, pipeline_component='tagger')
for doc in docs:
# this will be done for all tokens in the document, also for masked tokens
# unless "compact" is called before
tagger(doc)
return docs
def pos_tags(docs, tag_attrib='pos_', tagger=None, nlp_instance=None):
"""
Return Part-of-Speech (POS) tags of `docs`. If POS tagging was not applied to `docs` yet, this function
runs :func:`~tmtoolkit.preprocess.pos_tag` first.
:param docs: list of spaCy documents
:param tag_attrib: spaCy document tag attribute to fetch the POS tag; ``"pos_"`` and ``"pos"`` give coarse POS
tags as string or integer tags respectively, ``"tag_"`` and ``"tag"`` give fine grained POS
tags as string or integer tags
:param tagger: POS tagger instance to use; by default, use the tagger for the currently loaded spaCy nlp instance
:param nlp_instance: spaCy nlp instance
:return: POS tags of `docs` as list of strings or integers depending on `tag_attrib`
"""
require_spacydocs(docs)
if tag_attrib not in {'pos', 'pos_', 'tag', 'tag_'}:
raise ValueError("`tag_attrib` must be 'pos', 'pos_', 'tag' or 'tag_'")
if not docs:
return []
first_doc = next(iter(docs))
if not getattr(first_doc, tag_attrib, False):
pos_tag(docs, tagger=tagger, nlp_instance=nlp_instance)
return _get_docs_tokenattrs(docs, tag_attrib, custom_attr=False)
def filter_for_pos(docs, required_pos, simplify_pos=True, pos_attrib='pos_', tagset='ud', inverse=False):
"""
Filter tokens for a specific POS tag (if `required_pos` is a string) or several POS tags (if `required_pos`
is a list/tuple/set of strings). The POS tag depends on the tagset used during tagging. See
https://spacy.io/api/annotation#pos-tagging for a general overview on POS tags in SpaCy and refer to the
documentation of your language model for specific tags.
If `simplify_pos` is True, then the tags are matched to the following simplified forms:
* ``'N'`` for nouns
* ``'V'`` for verbs
* ``'ADJ'`` for adjectives
* ``'ADV'`` for adverbs
* ``None`` for all other
:param docs: list of spaCy documents
:param required_pos: single string or list of strings with POS tag(s) used for filtering
:param simplify_pos: before matching simplify POS tags in documents to forms shown above
:param pos_attrib: token attribute name for POS tags
:param tagset: POS tagset used while tagging; necessary for simplifying POS tags when `simplify_pos` is True
:param inverse: inverse the matching results, i.e. *remove* tokens that match the POS tag
:return: filtered list of spaCy documents
"""
require_spacydocs(docs)
docs_pos = _get_docs_tokenattrs(docs, pos_attrib, custom_attr=False)
if required_pos is None or not isinstance(required_pos, (tuple, list)):
required_pos = [required_pos]
if simplify_pos:
simplify_fn = np.vectorize(lambda x: simplified_pos(x, tagset=tagset))
else:
simplify_fn = np.vectorize(lambda x: x) # identity function
matches = [np.isin(simplify_fn(dpos), required_pos)
if len(dpos) > 0
else np.array([], dtype=bool)
for dpos in docs_pos]
return _apply_matches_array(docs, matches, invert=inverse)
def lemmatize(docs, lemma_attrib='lemma_'):
"""
Lemmatize documents according to `language` or use a custom lemmatizer function `lemmatizer_fn`.
:param docs: list of spaCy documents
:param tag_attrib: spaCy document tag attribute to fetch the lemmata; ``"lemma_"`` gives lemmata as strings,
``"lemma"`` gives lemmata as integer token IDs
:return: list of string lists with lemmata for each document
"""
require_spacydocs(docs)
docs_lemmata = _get_docs_tokenattrs(docs, lemma_attrib, custom_attr=False)
# SpaCy lemmata sometimes contain special markers like -PRON- instead of the lemma;
# fix this here by resorting to the original token
toks = doc_tokens(docs, to_lists=True)
new_docs_lemmata = []
assert len(docs_lemmata) == len(toks)
for doc_tok, doc_lem in zip(toks, docs_lemmata):
assert len(doc_tok) == len(doc_lem)
new_docs_lemmata.append([t if l.startswith('-') and l.endswith('-') else l
for t, l in zip(doc_tok, doc_lem)])
return new_docs_lemmata
def tokens2ids(docs):
"""
Convert a list of spaCy documents `docs` to a list of numeric token ID arrays. The IDs correspond to the current
spaCy vocabulary.
.. seealso:: :func:`~tmtoolkit.preprocess.ids2tokens` which reverses this operation.
:param docs: list of spaCy documents
:return: list of token ID arrays
"""
require_spacydocs(docs)
return [d.to_array('ORTH') for d in docs]
def ids2tokens(vocab, tokids):
"""
Convert list of numeric token ID arrays `tokids` to a character token array with the help of the spaCy vocabulary
`vocab`. Returns result as list of spaCy documents.
.. seealso:: :func:`~tmtoolkit.preprocess.tokens2ids` which reverses this operation.
:param vocab: spaCy vocabulary
:param tokids: list of numeric token ID arrays as from :func:`~tmtoolkit.preprocess.tokens2ids`
:return: list of spaCy documents
"""
return [Doc(vocab, words=[vocab[t].orth_ for t in ids]) for ids in tokids]
#%% functions that operate on a single spacy document
def spacydoc_from_tokens(tokens, vocab=None, spaces=None, lemmata=None, label=None):
"""
Create a new spaCy ``Doc`` document with tokens `tokens`.
:param tokens: list, tuple or NumPy array of string tokens
:param vocab: list, tuple, set, NumPy array or spaCy ``Vocab`` object with vocabulary; if None, vocabulary will be
generated from `tokens`
:param spaces: list, tuple or NumPy array of whitespace for each token
:param lemmata: list, tuple or NumPy array of string lemmata for each token
:param label: document label
:return: spaCy ``Doc`` document
"""
require_types(tokens, (tuple, list, np.ndarray), error_msg='the argument must be a list, tuple or NumPy array')
tokens = [t for t in (tokens.tolist() if isinstance(tokens, np.ndarray) else tokens)
if t] # spaCy doesn't accept empty tokens and also no NumPy "np.str_" type tokens
if vocab is None:
vocab = Vocab(strings=list(vocabulary([tokens])))
elif not isinstance(vocab, Vocab):
vocab = Vocab(strings=vocab.tolist() if isinstance(vocab, np.ndarray) else list(vocab))
if lemmata is not None and len(lemmata) != len(tokens):
raise ValueError('`lemmata` must have the same length as `tokens`')
new_doc = Doc(vocab, words=tokens, spaces=spaces)
assert len(new_doc) == len(tokens)
if label is not None:
new_doc._.label = label
_init_doc(new_doc, tokens)
if lemmata is not None:
lemmata = lemmata.tolist() if isinstance(lemmata, np.ndarray) else lemmata
for t, lem in zip(new_doc, lemmata):
t.lemma_ = lem
return new_doc
def doc_glue_subsequent(doc, matches, glue='_', return_glued=False):
"""
Select subsequent tokens in `doc` as defined by list of indices `matches` (e.g. output of
:func:`~tmtoolkit.preprocess.token_match_subsequent`) and join those by string `glue`. Return `doc` again.
.. note:: This function modifies `doc` in place. All token attributes will be reset to default values besides
``"lemma_"`` which are set to the joint token string.
.. warning:: Only works correctly when matches contains indices of *subsequent* tokens.
Example::
# doc is a SpaCy document with tokens ['a', 'b', 'c', 'd', 'd', 'a', 'b', 'c']
token_glue_subsequent(doc, [np.array([1, 2]), np.array([6, 7])])
# doc now contains tokens ['a', 'b_c', 'd', 'd', 'a', 'b_c']
.. seealso:: :func:`~tmtoolkit.preprocess.token_match_subsequent`
:param doc: a SpaCy document which must be compact (i.e. no filter mask set)
:param matches: list of NumPy arrays with *subsequent* indices into `tokens` (e.g. output of
:func:`~tmtoolkit.preprocess.token_match_subsequent`)
:param glue: string for joining the subsequent matches or None if no joint tokens but an empty string should be set
as token value and lemma
:param return_glued: if yes, return also a list of joint tokens
:return: either two-tuple or input `doc`; if `return_glued` is True, return a two-tuple with 1) `doc` and 2) a list
of joint tokens; if `return_glued` is True only return 1)
"""
require_listlike(matches)
if return_glued and glue is None:
raise ValueError('if `glue` is None, `return_glued` must be False')
if not doc.user_data['mask'].all():
raise ValueError('`doc` must be compact, i.e. no filter mask should be set (all elements in '
'`doc.user_data["mask"]` must be True)')
n_tok = len(doc)
if n_tok == 0:
if return_glued:
return doc, []
else:
return doc
# map span start index to end index
glued = []
# within this context, `doc` doesn't change (i.e. we can use the same indices into `doc` throughout the for loop
# even when we merge tokens
del_tokens_indices = []
with doc.retokenize() as retok:
# we will need to update doc.user_data['tokens'], which is a NumPy character array;
# a NumPy char array has a maximum element size and we will need to update that to the
# maximum string length in `chararray_updates` by using `widen_chararray()` below
chararray_updates = {}
for m in matches:
assert len(m) >= 2
begin, end = m[0], m[-1]
span = doc[begin:end+1]
merged = '' if glue is None else glue.join(doc.user_data['tokens'][begin:end+1])
assert begin not in chararray_updates.keys()
chararray_updates[begin] = merged
del_tokens_indices.extend(list(range(begin+1, end+1)))
attrs = {
'LEMMA': merged,
'WHITESPACE': doc[end].whitespace_
}
retok.merge(span, attrs=attrs)
if return_glued:
glued.append(merged)
if chararray_updates:
new_maxsize = max(map(len, chararray_updates.values()))
doc.user_data['tokens'] = widen_chararray(doc.user_data['tokens'], new_maxsize)
for begin, merged in chararray_updates.items():
doc.user_data['tokens'][begin] = merged
doc.user_data['tokens'] = | np.delete(doc.user_data['tokens'], del_tokens_indices) | numpy.delete |
"""
Copyright (C) 2020 Piek Solutions LLC
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import pyvisa as visa
import time
import sys, traceback
import re as regex
import numpy as np
class VisaInstrument:
def __init__(self, ip, gpib_address):
"""
initialize visa instrument resource
:param ip: (str) ip address of Papaya
:param gpib_address: (str) GPIB address of instrument
"""
resource_name = "TCPIP0::%s::inst%s::INSTR" % (ip, gpib_address)
print(resource_name)
rm = visa.ResourceManager()
self.instr = rm.open_resource(resource_name)
self.instr.timeout = 10000
def close(self):
self.instr.close()
def cls(self):
try:
self.instr.write('*CLS')
except ValueError:
print('*CLS fails to clear')
def _set_ESE(self, x):
try:
cmd = '*ESE ' + str(x)
self.instr.write(cmd)
except ValueError:
print ('*ESE write fails')
def _get_ESE(self, x):
try:
resp = self.instr.query('*ESE?')
self._output = float(resp)
except ValueError:
print('*ESE query fails')
return self._output
ESE = property(_get_ESE, _set_ESE, "ESE property")
def _set_SRE(self, x):
try:
cmd = '*SRE ' + str(x)
self.instr.write(cmd)
except ValueError:
print ('*SRE write fails')
def _get_SRE(self, x):
try:
resp = self.instr.query('*SRE?')
self._output = float(resp)
except ValueError:
print('*SRE query fails')
return self._output
SRE = property(_get_SRE, _set_SRE, "SRE property")
def queryIDN(self):
try:
data = self.instr.query('*IDN?')
return data
except ValueError:
print('*IDN query fails')
class Keysight_N9030B(VisaInstrument):
def getTrace(self, tra='TRACE1'):
count = 0
try:
self.instr.write('trac:data? %s' %tra)
resp = self.instr.read()
flag = '\n' in resp
while not flag:
tmp = self.instr.read()
resp += tmp
flag = '\n' in tmp
count += 1
except visa.VisaIOError:
print('error getting trace')
print(tmp)
traceback.print_exc()
sys.exit(3)
ary = resp.split(',')
dd = np.array([float(c) for c in ary])
return dd
def getTraceXY(self, tra='san1'):
count = 0
try:
self.instr.write('fetch:%s?' %tra)
resp = self.instr.read()
flag = '\n' in resp
while not flag:
tmp = self.instr.read()
resp += tmp
flag = '\n' in tmp
count += 1
except visa.VisaIOError:
print('error getting xy trace')
print(tmp)
traceback.print_exc()
sys.exit(3)
ary = resp.split(',')
dd = np.array([float(c) for c in ary])
return dd
class Anritsu_M4647A(VisaInstrument):
def sweepOnce(self):
self.instr.write('TRS;WFS;HLD')
time.sleep(11)
def readSXX(self, fmt='OS11C'):
try:
self.instr.write(fmt) # C here refers to calibrated
resp = self.instr.read()
s = regex.findall(r'^#\d+', resp)[0] # get the first elm in string instead of list
pos = int(s[1]) + 3
_num = int(s[2:len(s)]) # total number of bytes to read
resp = resp[pos:len(resp)] # remove the header
cnt = len(resp)
while cnt < _num:
tmp = self.instr.read()
cnt += len(tmp)
resp += tmp
except visa.VisaIOError:
traceback.print_exc()
sys.exit(3)
# make them into real numbers
y = resp.split('\n')
y = y[0:len(y)-1] # last element is \n
real = np.zeros(len(y), dtype=float)
imag = np.zeros(len(y), dtype=float)
for i_ in range(0, len(y)):
valstr = y[i_].split(',') # split into real and imag
real[i_] = float(valstr[0])
imag[i_] = float(valstr[1])
c = real + 1.j*imag
return c
def freq(self):
try:
self.instr.write(':sens1:freq:data?')
resp = self.instr.read()
s = regex.findall(r'^#\d+', resp)[0] # get the first elm in string instead of list
pos = int(s[1]) + 3
_num = int(s[2:len(s)]) # total number of bytes to read
resp = resp[pos:len(resp)] # remove the header
cnt = len(resp)
while cnt < _num:
tmp = self.instr.read()
cnt += len(tmp)
resp += tmp
except visa.VisaIOError:
traceback.print_exc()
sys.exit(3)
y = resp.split('\n')
y = y[0:len(y)-1] # last element is \n
val = np.array([float(c) for c in y])
return val
class Keithley_2400(VisaInstrument):
def sourcetype(self, type):
if type == 'voltage':
self.instr.write(':SOUR:FUNC VOLT')
self.instr.write(':SENS:FUNC "CURR"')
elif type == 'current':
self.instr.write(':SOUR:FUNC CURR')
self.instr.write(':SENS:FUNC "VOLT"')
def setvoltage(self, vb, curlimit=0.05):
self.instr.write(':SENS:CURR:PROT %f' % curlimit)
self.instr.write(':SOUR:VOLT:LEV %f' % vb)
def querycurrent(self):
try:
self.instr.write(':FORM:ELEM CURR')
cur = self.instr.query('READ?')
c = float(cur)
except ValueError:
print('Keithley 2400 warning: current reading error...')
print(cur)
c = -1000
return float(c)
def setcurrent(self, cur, vlimit=2):
self.instr.write(':SENS:VOLT:PROT %f' % vlimit)
self.instr.write(':SOUR:CURR:LEV %s' % cur)
def _get_output(self):
try:
resp = self.instr.query(':OUTPUT?')
self._output = float(resp)
except ValueError:
print('Keithley 2400 query fails')
return self._output
def _set_output(self, x):
try:
cmd = ':OUTPUT ' + str(x)
self.instr.write(cmd)
except ValueError:
print('Keithley 2400 write fails')
self._output = x
output = property(_get_output, _set_output, "output property")
class Agilent_E3631(VisaInstrument):
def _get_outPutOnOff(self):
try:
resp = self.instr.query(':outp?')
self._outputOnOff = resp
except ValueError:
print('Agilent E3631 query outp fails')
return self._outputOnOff
def _set_outPutOnOff(self, x):
try:
cmd = 'outp ' + str(x)
self.instr.write(cmd)
except ValueError:
print('Agilent E3631 write outp fails')
self._outputOnOff = x
outputOnOff = property(_get_outPutOnOff, _set_outPutOnOff, "outputOnOff property")
def queryCurrent(self):
try:
resp=self.instr.query(':meas:curr:dc?')
except ValueError:
print('Agilent E3631 query current fails')
return float(resp)
def queryVoltage(self):
try:
resp=self.instr.query(':meas:volt:dc?')
except ValueError:
print('Agilent E3631 query voltage fails')
return float(resp)
def selectPowerSupply(self, x):
"""
select power supply instrument,
:param x: (int) 1 is P6V, 2 is P25V and 3 is N25V
:return: none
"""
try:
cmd = 'INST:NSEL ' + str(x)
self.instr.write(cmd)
except ValueError:
print('Agilent E3631 select power supply fails')
def setP6VSupply(self, x):
try:
# P6V is 1
self.instr.write('INST:NSEL 1')
cmd = 'volt ' + str(x)
self.instr.write(cmd)
except ValueError:
print('Agilent E3631 set P6V fails')
def queryP6VSetVoltage(self):
try:
# P6V is 1
self.instr.write('INST:NSEL 1')
val = self.instr.query('volt?')
except ValueError:
print('Agilent E3631 query P6V fails')
return float(val)
def setP25VSupply(self,x):
try:
# P25V is 2
self.instr.write('INST:NSEL 2')
cmd = 'volt ' + str(x)
self.instr.write(cmd)
except ValueError:
print('Agilent E3631 set P25V fails')
def queryP25VSetVoltage(self):
try:
# P25V is 2
self.instr.write('INST:NSEL 2')
val = self.instr.query('volt?')
except ValueError:
print('Agilent E3631 query P25V fails')
return float(val)
def setN25VSupply(self, x):
# N25V is 3
try:
self.instr.write('INST:NSEL 3')
cmd = 'volt ' + str(x)
self.instr.write(cmd)
except ValueError:
print('Agilent E3631 set N25V fails')
def queryN25VSetVoltage(self):
# N25V is 3
try:
self.instr.write('INST:NSEL 3')
val = self.instr.query('volt?')
except ValueError:
print('Agilent E3631 query N25V fails')
return float(val)
class Keysight_E3649A(VisaInstrument):
def _get_outputOnOff(self):
"""
query output state
:return: 0(OFF) or 1(ON)
"""
try:
resp = self.instr.query('OUTP?')
self._outputOnOff = resp.rstrip()
except ValueError:
print('Agilent E3649A query outp on/off fails')
return self._outputOnOff
def _set_outputOnOff(self, x):
"""
turn output on or off
:param x: either ON or OFF
:return: None
"""
try:
self.instr.write('OUTP ' + str(x))
except ValueError:
print('Agilent E3649A write outp on/off fails')
self._outputOnOff = x
outputOnOff = property(_get_outputOnOff, _set_outputOnOff, "outputOnOff property")
def queryCurrent(self, output_num=None):
"""
query current of selected output
:param output_num: (int) the output to query (None|1|2);
default value None uses the output previously set.
:return: (float) current
"""
try:
if output_num:
self.instr.write('INST:NSEL ' + str(output_num))
resp = self.instr.query('MEAS:CURR:DC?')
return float(resp)
except visa.VisaIOError or ValueError:
print('Agilent E3649A query current fails')
def setCurrent(self, curr, output_num=None):
"""
query current of selected output
:param curr: (float) the desired current level
:param output_num: (int) the output to query (None|1|2);
default value None uses the output previously set.
:return: None
"""
try:
if output_num:
self.instr.write('INST:NSEL ' + str(output_num))
self.instr.write('CURR ' + str(curr))
except visa.VisaIOError or ValueError:
print('Agilent E3649A query current fails')
def queryVoltage(self, output_num=None):
"""
query voltage of selected output
:param output_num: (int) the output to read (None|1|2);
default value None uses the output previously set.
:return: (float) voltage
"""
try:
if output_num:
self.instr.write('INST:NSEL ' + str(output_num))
resp = self.instr.query('MEAS:VOLT:DC?')
return float(resp)
except visa.VisaIOError or ValueError:
print('Agilent E3649A query voltage fails')
def setVoltage(self, volt, output_num=None):
"""
set voltage of selected output
:param volt: (float) the desired voltage level
:param output_num: (int) the output to set (None|1|2);
default value None uses the output previously set.
:return: None
"""
try:
if output_num:
self.instr.write('INST:NSEL ' + str(output_num))
self.instr.write('VOLT ' + str(volt))
except visa.VisaIOError or ValueError:
print('Agilent E3649A set voltage fails')
def selectOutput(self, output_num):
"""
select which output to modify
:param output_num: (int) the output to modify (1|2)
:return: None
"""
try:
self.instr.write('INST:NSEL ' + str(output_num))
except visa.VisaIOError:
print('Agilent E3649A select output fails')
def queryOutputRange(self, output_num=None):
"""
query range setting of selected output
:param output_num: (int) the output to read (None|1|2);
default value None uses the output previously set.
:return: (str) P35V or P60V
"""
try:
if output_num:
self.instr.write('INST:NSEL ' + str(output_num))
resp = self.instr.query(':VOLT:RANG?')
return resp.rstrip()
except visa.VisaIOError:
print('Agilent E3649A query output range fails')
def setOutputRange(self, volt_range, output_num=None):
"""
set voltage range of selected output
:param volt_range: the voltage range to set output to (P35V|LOW|P60V|HIGH)
:param output_num: (int) the output to modify (None|1|2);
default value None uses the output previously set.
:return: None
"""
try:
if output_num:
self.instr.write('INST:NSEL ' + str(output_num))
self.instr.write(':VOLT:RANG ' + str(volt_range))
except visa.VisaIOError:
print('Agilent E3649A set output voltage fails')
def setOutputLow(self, output_num=None):
"""
set voltage range of selected output to 35V
:param output_num: (int) the output to modify (None|1|2);
default value None uses the output previously set.
:return: None
"""
try:
if output_num:
self.instr.write('INST:NSEL ' + str(output_num))
self.instr.write(':VOLT:RANG LOW')
except visa.VisaIOError:
print('Agilent E3649A set output voltage LOW fails')
def setOutputHigh(self, output_num=None):
"""
set voltage range of output to 60V
:param output_num: (int) the output to modify (None|1|2);
default value None uses the output previously set.
:return: None
"""
try:
if output_num:
self.instr.write('INST:NSEL ' + str(output_num))
self.instr.write(':VOLT:RANG HIGH')
except visa.VisaIOError:
print('Agilent E3649A set output voltage HIGH fails')
def enableVoltageProtection(self, enable=1, output_num=None):
"""
enable or disable the overvoltage protection function.
:param enable: (0|1|OFF|ON)
:param output_num: output_num: (int) the output to modify (None|1|2);
default value None uses the output previously set.
:return: None
"""
try:
if output_num:
self.instr.write('INST:NSEL ' + str(output_num))
self.instr.write(':VOLT:PROT:STAT ' + str(enable))
except visa.VisaIOError:
print('Agilent E3649A enable voltage protection fails')
def setVoltageProtection(self, volt, output_num=None):
"""
set the voltage level at which the overvoltage protection
(OVP) circuit will trip.
:param volt: voltage level, 'MIN', or 'MAX'
:param output_num: (int) the output to modify (None|1|2);
default value None uses the output previously set.
:return: None
"""
try:
if output_num:
self.instr.write('INST:NSEL ' + str(output_num))
self.instr.write(':VOLT:PROT ' + str(volt))
except visa.VisaIOError:
print('Agilent E3649A set output voltage protection fails')
def queryVoltageProtection(self, output_num=None):
"""
query the protection state and voltage level at which the
overvoltage protection (OVP) circuit will trip.
:param output_num: (int) the output to modify (None|1|2);
default value None uses the output previously set.
:return: tuple (int, str) consisting of enable 0 (OFF) or 1 (ON)
and the voltage trip level.
"""
try:
ena = self.instr.query('VOLT:PROT:STAT?')
level = self.instr.query('VOLT:PROT?')
return ena.rstrip(), level.rstrip()
except visa.VisaIOError:
print('Agilent E3649A query output voltage protection fails')
class Agilent_33401(VisaInstrument):
def acVoltage(self):
try:
self.instr.write(':meas:volt:ac?')
resp = self.instr.read()
return float(resp)
except ValueError:
print('Agilent 33401 query ac volt fails')
def acCurrent(self):
try:
self.instr.write(':meas:curr:ac?')
resp = self.instr.read()
return float(resp)
except ValueError:
print('Agilent 33401 query ac curr fails')
def dcVoltage(self):
try:
self.instr.write(':meas:volt:dc?')
resp = self.instr.read()
return float(resp)
except ValueError:
print('Agilent 33401 query dc volt fails')
def dcCurrent(self):
try:
self.instr.write(':meas:curr:dc?')
resp = self.instr.read()
return float(resp)
except ValueError:
print('Agilent 33401 query dc curr fails')
class Keithley_2510(VisaInstrument):
def querytemp(self):
try:
self.instr.write(':MEAS:TEMP?')
temp = self.instr.read()
t = float(temp)
except ValueError:
print('Keithley 2510 warning: temp read error...')
print(temp)
t = -1000
return float(t)
def settemp(self, setT='25'):
self.instr.write(':SOUR:TEMP %f' % setT)
def _get_output(self):
try:
resp = self.instr.query(':OUTPUT?')
self._output = float(resp)
except ValueError:
print('Keithley 2510 query outp fails')
return self._output
def _set_output(self, x):
try:
cmd = ':OUTPUT ' + str(x)
self.instr.write(cmd)
except ValueError:
print('Keithley 2510 write outp fails')
self._output = x
output = property(_get_output, _set_output, "output property")
class Newport_3150(VisaInstrument):
def querytemp(self):
temp = self.instr.query(':TEC:T?')
try:
t = float(temp)
except ValueError:
print('Newport 3150 warning: temp read error...')
print(temp)
t = -1000
return float(t)
def settemp(self, setT='25'):
self.instr.write(':TEC:T %f' % setT)
class Agilent_8163(VisaInstrument):
def queryIDN(self):
try:
resp = self.instr.query('*IDN?')
except ValueError:
print('Agilent 8163 fails query')
return resp
def querypower(self):
try:
opt = self.instr.query('READ:POW?')
except ValueError:
print('Agilent 8163 fails query')
return float(opt)
class Keysight_Dca(VisaInstrument):
def initialize(self): # initiallize for PAM4 measurement
pass
def get_er(self, source='1', ch='2A'):
cmd = ':MEASure:EYE:OER:SOURce'+source+' CHAN'+ch
self.instr.write(cmd)
try:
er = self.instr.query(':MEASure:EYE:OER?')
return float(er)
except ValueError:
print('Keysight dca error')
def getOMA(self, source='1', ch='2A'):
cmd = ':MEASure:EYE:OOMA:SOURce'+source+' CHAN'+ch
self.instr.write(cmd)
try:
oma = self.instr.query(':MEASure:EYE:OOMA?')
return float(oma)
except ValueError:
print('Keysight dca error')
def getRLM(self, source='1', ch='2A'):
cmd = ':MEASure:EYE:PAM:LINearity:SOURce'+source+' CHAN'+ch
self.instr.write(cmd)
try:
rlm = self.instr.query(':MEASure:EYE:PAM:LINearity?')
return float(rlm)
except ValueError:
print('Keysight dca error')
def autoscale(self):
self.instr.write(':SYSTem:AUToscale')
try:
self.instr.query('*OPC?')
except ValueError:
print('Keysight dca error')
def clear(self):
self.instr.write(':ACQuire:CDISplay')
try:
self.instr.query('*OPC?')
except ValueError:
print('Keysight dca error')
def run(self):
self.instr.write(':ACQuire:RUN')
class Agilent_86142(VisaInstrument):
def _get_startWavelength(self):
try:
resp = self.instr.query(':sens:wav:star?')
self._startWavelength = float(resp)
except ValueError:
print('Agilent 86142 query fails')
return self._startWavelength
def _set_startWavelength(self, x):
try:
cmd = ':sens:wav:star ' + str(x)
self.instr.write(cmd)
self._startWavelength = x
except visa.VisaIOError:
print('Agilent 86142 write fails')
startWavelength = property(_get_startWavelength, _set_startWavelength, "startWavelength property")
def _get_stopWavelength(self):
try:
resp = self.instr.query(':sens:wav:stop?')
self._startWavelength = float(resp)
except ValueError:
print('Agilent 86142 query fails')
return self._startWavelength
def _set_stopWavelength(self, x):
try:
cmd = ':sens:wav:stop ' + str(x)
self.instr.write(cmd)
self._stopWavelength = x
except visa.VisaIOError:
print('Agilent 86142 write fails')
stopWavelength = property(_get_stopWavelength, _set_stopWavelength, "stopWavelength property")
def _get_traceLength(self):
try:
resp = self.instr.query(':SENS:SWE:POIN?')
self._traceLength = float(resp)
except ValueError:
print('Agilent 86142 query fails')
return self._traceLength
def _set_traceLength(self, x):
try:
cmd = ':SENS:SWE:POIN ' + str(x)
self.instr.write(cmd)
self._traceLength = x
except ValueError:
print('Agilent 86142 write fails')
traceLength = property(_get_traceLength, _set_traceLength, "traceLength property")
def getTrace(self):
tmp = ''
try:
self.instr.write('form ascii')
self.instr.write('trac? tra')
resp = self.instr.read()
flag = '\n' in resp
count = 0
while not flag:
tmp = self.instr.read()
resp += tmp
flag = '\n' in tmp
count += 1
except visa.VisaIOError:
print('error')
print(tmp)
traceback.print_exc()
sys.exit(3)
return resp
def getTrace1(self, pts):
tmp = ''
elmcount = []
count = 0
itr=0
try:
self.instr.write('form ascii')
self.instr.write('trac? tra')
resp = self.instr.read()
count += len(resp.split(','))
while count < pts:
tmp = self.instr.read()
count += len(tmp.split(','))
elmcount.append(count)
resp += tmp
itr += 1
except visa.VisaIOError:
print('error')
print(tmp)
traceback.print_exc()
sys.exit(3)
return resp
def getTraceBin(self):
try:
self.instr.write('form real32')
self.instr.write('trac? tra')
resp = self.instr.read()
return resp
except ValueError:
print('Agilent 86142 write fails')
class JDSU_HA9(VisaInstrument):
_attenuation = 0
_beamIsBlocked = 0
def _get_attenuation(self):
try:
resp = self.instr.query('att?')
self._attenuation = float(resp)
except ValueError:
print('JDSU HA9 query fails')
return self._attenuation
def _set_attenuation(self, x):
try:
cmd = 'att ' + str(x)
self.instr.write(cmd)
self._attenuation = x
except ValueError:
print('JDSU HA9 write fails')
attenuation = property(_get_attenuation, _set_attenuation, "attenuation property")
def _get_beamIsBlocked(self):
try:
resp = self.instr.query('D?')
self._beamIsBlocked = int(resp)
except ValueError:
print('JDSU HA9 query fails')
return self._beamIsBlocked
def _set_beamIsBlocked(self, x):
try:
cmd = 'D ' + str(int(x))
self.instr.write(cmd)
self._beamIsBlocked = int(x)
except ValueError:
print('JDSU HA9 write fails')
beamIsBlocked = property(_get_beamIsBlocked, _set_beamIsBlocked, "beamIsBlock property")
class N9020A_SpectrumAnalyzer(VisaInstrument):
_inputCoupling = 'DC' # default
_bandwidthResolution_MHz = 0.5
_bandwidthVideo_MHz = 10
_sweepPoints = 1001
_startFreqMHz = 10e-3
_stopFreqMHz = 1350
_traceAve = 1
_contSweep = 0
def _set_contSweep(self, x=1):
try:
cmd = ':INIT:CONT ' + str(x)
self.instr.write(cmd)
self._contSweep = str(x)
except ValueError:
print('N9020A fails to set cont sweep config')
def _get_contSweep(self):
try:
resp = self.instr.query(':INIT:CONT?')
self._contSweep=resp
except ValueError:
print('N9020A fails to get cont sweep config')
return self._contSweep
contSweep = property(_get_contSweep, _set_contSweep, 'input coupling property')
def _set_inputCoupling(self, x='DC'):
try:
cmd = 'INPut:COUPling ' + str(x)
self.instr.write(cmd)
self._inputCoupling = str(x)
except ValueError:
print('N9020A fails to set input coupling')
def _get_inputCoupling(self):
try:
resp = self.instr.query('INP:COUP?')
self._inputCoupling = resp
except ValueError:
print('N9020A fails to get input coupling')
return self._inputCoupling
inputCoupling = property(_get_inputCoupling, _set_inputCoupling, 'input coupling property')
def _set_bandwidthResolution_MHz(self,x=0.5):
try:
cmd = 'BANDWIDTH:RESOLUTION ' + str(x) + ' MHZ'
self.instr.write(cmd)
self._bandwidthResolution_MHz = float(x)
except ValueError:
print('N9020A fails to set bandwidth resolution')
def _get_bandwidthResolution_MHz(self):
try:
resp = self.instr.query('BANDWIDTH:RESOLUTION?')
self._bandwidthResolution_MHz = float(resp)/1e6 # in MHz
except ValueError:
print('N9020A fails to get bandwidth resolution')
return self._bandwidthResolution_MHz
resolutionBW_MHz = property(_get_bandwidthResolution_MHz, _set_bandwidthResolution_MHz, 'bandwidth resolution property')
def _set_bandwidthVideo_MHz(self, x=0.5):
try:
cmd = 'BANDWIDTH:VIDEO ' + str(x) + ' MHZ'
self.instr.write(cmd)
self._bandwidthResolution_MHz = float(x)
except ValueError:
print('N9020A fails to set video bandwidth')
def _get_bandwidthVideo_MHz(self):
try:
resp = self.instr.query('BANDWIDTH:VIDEO?')
self._bandwidthResolution_MHz = float(resp)/1e6 # in MHz
except ValueError:
print('N9020A fails to get video bandwidth')
return self._bandwidthResolution_MHz
videoBW_MHz = property(_get_bandwidthVideo_MHz, _set_bandwidthVideo_MHz, 'video bandwidth property')
def _set_sweepPoints(self,x=1001):
try:
cmd = 'SWEEP:POINTS ' + str(x)
self.instr.write(cmd)
self._sweepPoints = int(x)
except ValueError:
print('N9020A fails to set sweep points')
def _get_sweepPoints(self):
try:
resp = self.instr.query('SWEEP:POINTS?')
self._sweepPoints = int(resp) # in MHz
except ValueError:
print('N9020A fails to get sweep points')
return self._sweepPoints
sweepPoints = property(_get_sweepPoints, _set_sweepPoints, 'sweep points')
def _set_startFreqMHz(self,x=10e-3):
try:
cmd = 'FREQUENCY:START ' + str(x) + ' MHZ'
self.instr.write(cmd)
self._startFreqMHz = float(x)
except ValueError:
print('N9020A fails to set start frequency')
def _get_startFreqMHz(self):
try:
resp = self.instr.query('FREQUENCY:START?')
self._startFreqMHz = float(resp)/1e6 # in MHz
except ValueError:
print('N9020A fails to get stop frequency')
return self._startFreqMHz
startFreqMHz = property(_get_startFreqMHz, _set_startFreqMHz,'start frequency property')
def _set_stopFreqMHz(self, x=13.5e3):
try:
cmd = 'FREQUENCY:STOP ' + str(x) + ' MHZ'
self.instr.write(cmd)
self._stopFreqMHz = float(x)
except ValueError:
print('N9020A fails to set start frequency')
def _get_stopFreqMHz(self):
try:
resp = self.instr.query('FREQUENCY:STOP?')
self._stopFreqMHz = float(resp)/1e6 # in MHz
except ValueError:
print('N9020A fails to get stop frequency')
return self._stopFreqMHz
stopFreqMHz = property(_get_stopFreqMHz, _set_stopFreqMHz, 'start frequency property')
def _set_traceAve(self, x=1):
try:
if x >= 1:
cmd = 'ACP:AVER:COUN ' + str(x)
self.instr.write(cmd)
if x == 0:
self.instr.write('ACPower:AVERage OFF')
self._traceAve = int(x)
except ValueError:
print('N9020A fails to set trace average')
def _get_traceAve(self):
try:
resp = self.instr.query('ACPower:AVERage:COUNt?')
self._traceAve = int(resp)
except ValueError:
print('N9020A fails to get stop frequency')
return self._traceAve
traceAve = property(_get_traceAve, _set_traceAve, 'trace average')
def getTrace(self):
_points = self._get_sweepPoints()
_stopf = self._get_stopFreqMHz()
_startf = self._get_startFreqMHz()
_freq = np.linspace(_startf, _stopf, _points)
tmp = ''
try:
self.instr.write('FORMAT:TRACE:DATA ASCII')
self.instr.write('TRAC? TRACE1')
resp = self.instr.read()
flag = '\n' in resp
count = 0
while not flag:
tmp = self.instr.read()
resp += (tmp)
flag = '\n' in tmp
count += 1
except visa.VisaIOError:
print('N9020A get trace error')
print(tmp)
resp = tmp
traceback.print_exc()
sys.exit(3)
resp = resp.split(',')
y = [float(d) for d in resp]
y = np.array(y)
return _freq, y
def setMarkerPos(self,pos=0):
_points = self._get_sweepPoints()
cmd = 'calc:mark1:X:pos:cent ' + str(pos)
try:
if pos < _points:
self.instr.write(cmd)
except visa.VisaIOError:
print('N9020A write error: ' + cmd)
def getMarkerNoise(self, pos=0):
# cmd = 'CALC:MARK:FUNCNOIS'
try:
# self.instr.write(cmd)
self.setMarkerPos(pos)
val = self.instr.query('CALC:MARK:Y?')
return float(val)
except visa.VisaIOError:
print('N9020A getMarkerNoise error')
def getMarkerNoiceTrace(self):
_points = self._get_sweepPoints()
_stopf = self._get_stopFreqMHz()
_startf = self._get_startFreqMHz()
_freq = | np.linspace(_startf, _stopf, _points) | numpy.linspace |
# -*- coding: utf-8 -*-
"""
@brief test log(time=21s)
"""
import unittest
import warnings
from logging import getLogger
from typing import Any
import numpy
from sklearn.preprocessing import FunctionTransformer
from pyquickhelper.pycode import ExtTestCase, ignore_warnings
from mlprodict.onnx_conv import register_rewritten_operators, to_onnx
from mlprodict.onnxrt import OnnxInference
from mlprodict.npy import onnxnumpy_default, onnxnumpy_np, NDArray
import mlprodict.npy.numpy_onnx_impl as nxnp
def custom_fft_abs_py(x):
"onnx fft + abs python"
# see https://jakevdp.github.io/blog/
# 2013/08/28/understanding-the-fft/
dim = x.shape[1]
n = numpy.arange(dim)
k = n.reshape((-1, 1)).astype(numpy.float64)
kn = k * n * (-numpy.pi * 2 / dim)
kn_cos = numpy.cos(kn)
kn_sin = numpy.sin(kn)
ekn = numpy.empty((2,) + kn.shape, dtype=x.dtype)
ekn[0, :, :] = kn_cos
ekn[1, :, :] = kn_sin
res = numpy.dot(ekn, x.T)
tr = res ** 2
mod = tr[0, :, :] + tr[1, :, :]
return numpy.sqrt(mod).T
def _custom_fft_abs(x):
dim = x.shape[1]
n = nxnp.arange(0, dim).astype(numpy.float32)
k = n.reshape((-1, 1))
kn = (k * (n * | numpy.float32(-numpy.pi * 2) | numpy.float32 |
import copy
import operator
import numpy as np
import numpy.random as npr
from matplotlib import pyplot as plt
from mimo.distributions import Dirichlet
from mimo.distributions import CategoricalWithDirichlet
from mimo.distributions import NormalWishart
from mimo.distributions import GaussianWithNormalWishart
from mimo.mixtures import BayesianMixtureOfGaussians
npr.seed(1337)
nb_samples = 2500
data = np.zeros((nb_samples, 2))
step = 14. * np.pi / nb_samples
for i in range(data.shape[0]):
x = i * step - 6.
data[i, 0] = x + npr.normal(0, 0.1)
data[i, 1] = 3. * ( | np.sin(x) | numpy.sin |
"""The main lagrangian-filtering module.
This module contains the crucial datastructure for
lagrangian-filtering, `LagrangeFilter`. See project documentation
for examples on how to construct a filtering workflow using this
library.
"""
import dask.array as da
import numpy as np
from datetime import timedelta, datetime
from glob import iglob
import parcels
from scipy import signal
import netCDF4
import xarray as xr
import netCDF4 as nc
import sys
from .file import LagrangeParticleFile
import multiprocessing as mp
from functools import partial
import pathos.pools as pp
class LagrangeFilter(object):
"""The main class for a Lagrangian filtering workflow.
The workflow is set up using the input files and the filtering
parameters. Filtering can be performed all at once, or on
individual time levels.
Data must contain horizontal velocity components `U` and `V` to
perform the Lagrangian frame transformation. Any variables that should
be filtered must be specified in the `sample_variables` list (this
includes velocity).
Note:
We use the OceanParcels convention for variable names. This means that
``U``, ``V``, ``lon``, ``lat``, ``time`` and ``depth`` are canonical
names for properties required for particle advection. The mapping from
the actual variable name in your data files to these canonical names
is contained in the `variables` and `dimensions` dictionaries. When
specifying `filenames` or `sample_variables`, the canonical names
must be used, however any other variables may use whatever name you
would like.
Once the `LagrangeFilter` has been constructed, you may call it as
a function to perform the filtering workflow. See :func:`~filter`
for documentation.
Example:
A straightforward filtering workflow::
f = LagrangeFilter(
name, filenames, variables, dimensions, sample_variables,
)
f()
Would result in a new file with the given `name` and an appropriate
extension containing the filtered data for each of the `sample_variables`.
Args:
name (str): The name of the workflow
filenames (Dict[str, str]): A mapping from data variable names
to the files containing the data.
Filenames can contain globs if the data is spread across
multiple files.
variables_or_data (Union[Dict[str, str], xarray.Dataset]): Either
a mapping from canonical variable names to the variable
names in your data files, or an xarray Dataset containing
the input data.
dimensions (Dict[str, str]): A mapping from canonical dimension
names to the dimension names in your data files.
sample_variables ([str]): A list of variable names that should be sampled
into the Lagrangian frame of reference and filtered.
mesh (:obj:`str`, optional): The OceanParcels mesh type, either "flat"
or "spherical". "flat" meshes are expected to have dimensions
in metres, and "spherical" meshes in degrees.
c_grid (:obj:`bool`, optional): Whether to interpolate velocity
components on an Arakawa C grid (defaults to no).
indices (:obj:`Dict[str, [int]]`, optional): An optional dictionary
specifying the indices to which a certain dimension should
be restricted.
uneven_window (:obj:`bool`, optional): Whether to allow different
lengths for the forward and backward advection phases.
window_size (:obj:`float`, optional): The nominal length of the both
the forward and backward advection windows, in seconds. A
longer window may better capture the low-frequency signal to be
removed.
highpass_frequency (:obj:`float`, optional): The 3dB cutoff frequency
for filtering, below which spectral components will be
attenuated. This should be an angular frequency, in [rad/s].
advection_dt (:obj:`datetime.timedelta`, optional): The timestep
to use for advection. May need to be adjusted depending on the
resolution/frequency of your data.
"""
def __init__(
self,
name,
filenames_or_dataset,
variables,
dimensions,
sample_variables,
mesh="flat",
c_grid=False,
indices={},
uneven_window=False,
window_size=None,
highpass_frequency=5e-5,
advection_dt=timedelta(minutes=5),
):
# The name of this filter
self.name = name
# Width of window over which our filter computes a meaningful result
# in seconds. Default to 3.5 days on either side
if window_size is None:
self.window_size = timedelta(days=3.5).total_seconds()
else:
self.window_size = window_size
# Whether we're permitted to use uneven windows on either side
self.uneven_window = uneven_window
# copy input file dictionaries so we can construct the output file
# filenames dictionary is modified to expand globs when
# the fieldset is constructed
self._filenames = filenames_or_dataset
self._variables = variables
self._dimensions = dimensions
self._indices = indices
# sample variables without the "var_" prefix
self._sample_variables = sample_variables
# choose the fieldset constructor depending on the format
# of the input data
if isinstance(filenames_or_dataset, xr.Dataset):
fieldset_constructor = parcels.FieldSet.from_xarray_dataset
else:
fieldset_constructor = parcels.FieldSet.from_netcdf
# for C-grid data, we have to change the interpolation method
fieldset_kwargs = {}
if c_grid:
interp_method = {}
for v in variables:
if v in ["U", "V", "W"]:
interp_method[v] = "cgrid_velocity"
else:
interp_method[v] = "cgrid_tracer"
fieldset_kwargs["interp_method"] = interp_method
# construct the OceanParcels FieldSet to use for particle advection
self.fieldset = fieldset_constructor(
filenames_or_dataset,
variables,
dimensions,
indices=indices,
mesh=mesh,
**fieldset_kwargs,
)
# save the lon/lat on which to seed particles
# this is saved here because if the grid is later made periodic, the
# underlying grids will be modified, and we'll seed particles in the halos
if self.fieldset.gridset.grids[0].gtype in [
parcels.GridCode.CurvilinearZGrid,
parcels.GridCode.CurvilinearSGrid,
]:
self._curvilinear = True
self._grid_lon = self.fieldset.gridset.grids[0].lon
self._grid_lat = self.fieldset.gridset.grids[0].lat
else:
self._curvilinear = False
self._grid_lon, self._grid_lat = np.meshgrid(
self.fieldset.gridset.grids[0].lon, self.fieldset.gridset.grids[0].lat
)
# starts off non-periodic
self._is_zonally_periodic = False
self._is_meridionally_periodic = False
# guess the output timestep
times = self.fieldset.gridset.grids[0].time
self.output_dt = times[1] - times[0]
print('timestep =',self.output_dt,'seconds')
# create the filter - use a 4th order Butterworth for the moment
# make sure to convert angular frequency back to linear for passing to the
# filter constructor
fs = 1.0 / self.output_dt
self.inertial_filter = signal.butter(
4, highpass_frequency / (2 * np.pi), "lowpass", fs=fs
)
# timestep for advection
self.advection_dt = advection_dt
# the sample variable attribute has 'var_' prepended to map to
# variables on particles
self.sample_variables = ["var_" + v for v in sample_variables]
# create the particle class and kernel for sampling
# map sampled variables to fields
self.particleclass = ParticleFactory(sample_variables)
self._create_sample_kernel(sample_variables)
self.kernel = parcels.AdvectionRK4 + self.sample_kernel
# compile kernels
self._compile(self.sample_kernel)
self._compile(self.kernel)
def _create_sample_kernel(self, sample_variables):
"""Create the parcels kernel for sampling fields during advection."""
# make sure the fieldset has C code names assigned, etc.
self.fieldset.check_complete()
# string for the kernel itself
f_str = "def sample_kernel(particle, fieldset, time):\n"
for v in sample_variables:
f_str += f"\tparticle.var_{v} = fieldset.{v}[time, particle.depth, particle.lat, particle.lon]\n"
else:
f_str += "\tpass"
# create the kernel
self.sample_kernel = parcels.Kernel(
self.fieldset,
self.particleclass.getPType(),
funcname="sample_kernel",
funcvars=["particle", "fieldset", "time"],
funccode=f_str,
)
def _compile(self, kernel):
"""Compile a kernel and tell it to load the resulting shared library."""
kernel.compile(compiler=parcels.compiler.GNUCompiler())
kernel.load_lib()
def make_zonally_periodic(self, width=None):
"""Mark the domain as zonally periodic.
This will add a halo to the eastern and western edges of the
domain, so that they may cross over during advection without
being marked out of bounds. If a particle ends up within the
halo after advection, it is reset to the valid portion of the
domain.
If the domain has already been marked as zonally periodic,
nothing happens.
Due to the method of resetting particles that end up in the
halo, this is incompatible with curvilinear grids.
Args:
width (:obj:`int`, optional): The width of the halo,
defaults to 5 (per parcels). This needs to be less
than half the number of points in the grid in the x
direction. This may need to be adjusted for small
domains, or if particles are still escaping the halo.
Note:
This causes the kernel to be recompiled to add another stage
which resets particles that end up in the halo to the main
domain.
If the kernel has already been recompiled for meridional periodicity,
it is again reset to include periodicity in both
directions.
"""
# the method of resetting particles won't work on a curvilinear grid
if self._curvilinear:
raise Exception("curvilinear grids can not be periodic")
# make sure we can't do this twice
if self._is_zonally_periodic:
return
# add constants that are accessible within the kernel denoting the
# edges of the halo region
self.fieldset.add_constant("halo_west", self.fieldset.gridset.grids[0].lon[0])
self.fieldset.add_constant("halo_east", self.fieldset.gridset.grids[0].lon[-1])
if width is None:
self.fieldset.add_periodic_halo(zonal=True)
else:
self.fieldset.add_periodic_halo(zonal=True, halosize=width)
# unload the advection-only kernel, and add the periodic-reset kernel
self.kernel.remove_lib()
if self._is_meridionally_periodic:
k = _doubly_periodic_BC
else:
k = _zonally_periodic_BC
periodic_kernel = parcels.Kernel(
self.fieldset, self.particleclass.getPType(), k
)
self.kernel = parcels.AdvectionRK4 + periodic_kernel + self.sample_kernel
self._compile(self.kernel)
self._is_zonally_periodic = True
def make_meridionally_periodic(self, width=None):
"""Mark the domain as meridionally periodic.
This will add a halo to the northern and southern edges of the
domain, so that they may cross over during advection without
being marked out of bounds. If a particle ends up within the
halo after advection, it is reset to the valid portion of the
domain.
If the domain has already been marked as meridionally periodic,
nothing happens.
Due to the method of resetting particles that end up in the
halo, this is incompatible with curvilinear grids.
Args:
width (:obj:`int`, optional): The width of the halo,
defaults to 5 (per parcels). This needs to be less
than half the number of points in the grid in the y
direction. This may need to be adjusted for small
domains, or if particles are still escaping the halo.
Note:
This causes the kernel to be recompiled to add another stage
which resets particles that end up in the halo to the main
domain.
If the kernel has already been recompiled for zonal periodicity,
it is again reset to include periodicity in both
directions.
"""
# the method of resetting particles won't work on a curvilinear grid
if self._curvilinear:
raise Exception("curvilinear grids can not be periodic")
# make sure we can't do this twice
if self._is_meridionally_periodic:
return
# add constants that are accessible within the kernel denoting the
# edges of the halo region
self.fieldset.add_constant("halo_north", self.fieldset.gridset.grids[0].lat[-1])
self.fieldset.add_constant("halo_south", self.fieldset.gridset.grids[0].lat[0])
if width is None:
self.fieldset.add_periodic_halo(meridional=True)
else:
self.fieldset.add_periodic_halo(meridional=True, halosize=width)
# unload the previous kernel, and add the meridionally-periodic kernel
self.kernel.remove_lib()
if self._is_zonally_periodic:
k = _doubly_periodic_BC
else:
k = _meridionally_periodic_BC
periodic_kernel = parcels.Kernel(
self.fieldset, self.particleclass.getPType(), k
)
self.kernel = parcels.AdvectionRK4 + periodic_kernel + self.sample_kernel
self._compile(self.kernel)
self._is_meridionally_periodic = True
def particleset(self, time):
"""Create a ParticleSet initialised at the given time.
Args:
time (float): The origin time for forward and backward advection
on this ParticleSet.
Returns:
parcels.ParticleSet: A new ParticleSet containing a single particle
at every gridpoint, initialised at the specified time.
"""
# reset the global particle ID counter so we can rely on particle IDs making sense
parcels.particle.lastID = 0
return parcels.ParticleSet(
self.fieldset,
pclass=self.particleclass,
lon=self._grid_lon,
lat=self._grid_lat,
time=time,
)
def advection_step(self, time, output_time=False):
"""Perform forward-backward advection at a single point in time.
This routine is responsible for creating a new ParticleSet at
the given time, and performing the forward and backward
advection steps in the Lagrangian transformation.
Args:
time (float): The point in time at which to calculate filtered data.
output_time (:obj:`bool`, optional): Whether to include "time" as
a numpy array in the output dictionary, for doing manual analysis.
Note:
If ``output_time`` is True, the output object will not be compatible
with the default filtering workflow, :func:`~filter_step`!
Returns:
Dict[str, (int, dask.array)]: A dictionary of the advection
data, mapping variable names to a pair. The first element is
the index of the sampled timestep in the data, and the
second element is a lazy dask array concatenating the forward
and backward advection data.
"""
# seed all particles at gridpoints
ps = self.particleset(time)
# execute the sample-only kernel to efficiently grab the initial condition
ps.kernel = self.sample_kernel
ps.execute(self.sample_kernel, runtime=0, dt=self.advection_dt)
# set up the temporary output file for the initial condition and
# forward advection
outfile = LagrangeParticleFile(ps, self.output_dt, self.sample_variables)
# now the forward advection kernel can run
outfile.set_group("forward")
ps.kernel = self.kernel
ps.execute(
self.kernel,
runtime=self.window_size,
dt=self.advection_dt,
output_file=outfile,
)
# reseed particles back on the grid, then advect backwards
# we don't need any initial condition sampling since we've already done it
outfile.set_group("backward")
ps = self.particleset(time)
ps.kernel = self.kernel
ps.execute(
self.kernel,
runtime=self.window_size,
dt=-self.advection_dt,
output_file=outfile,
)
# stitch together and filter all sample variables from the temporary
# output data
da_out = {}
for v in self.sample_variables:
# load data lazily as dask arrays, for forward and backward segments
var_array_forward = da.from_array(
outfile.data("forward")[v], chunks=(None, "auto")
)[:-1, :]
var_array_backward = da.from_array(
outfile.data("backward")[v], chunks=(None, "auto")
)[:-1, :]
# get an index into the middle of the array
time_index_data = var_array_backward.shape[0] - 1
# construct proper sequence by concatenating data and flipping the backward segment
# for var_array_forward, skip the initial output for both the sample-only and
# sample-advection kernels, which have meaningless data
var_array = da.concatenate(
(da.flip(var_array_backward[1:, :], axis=0), var_array_forward)
)
da_out[v] = (time_index_data, var_array)
if output_time:
da_out["time"] = np.concatenate(
(
outfile.data("backward").attrs["time"][1:-1][::-1],
outfile.data("forward").attrs["time"][:-1],
)
)
return da_out
def filter_step(self, advection_data):
"""Perform filtering of a single step of advection data.
The Lagrangian-transformed data from :func:`~advection_step` is
high-pass filtered in time, leaving only the signal at the
origin point (i.e. the filtered forward and backward advection
data is discarded).
Args:
advection_data (Dict[str, (int, dask.array)]): A dictionary of
particle advection data from a single timestep, returned
from :func:`~advection_step`.
Returns:
Dict[str, dask.array]: A dictionary mapping sampled
variable names to a 1D dask array containing the
filtered data at the specified time. This data is not
lazy, as it has already been computed out of the
temporary advection data.
"""
da_out = {}
for v, a in advection_data.items():
time_index_data, var_array = a
def filter_select(x):
return signal.filtfilt(*self.inertial_filter, x)[..., time_index_data]
# apply scipy filter as a ufunc
# mapping an array to scalar over the first axis, automatically vectorize execution
# and allow rechunking (since we have a chunk boundary across the first axis)
filtered = da.apply_gufunc(
filter_select,
"(i)->()",
var_array,
axis=0,
output_dtypes=var_array.dtype,
allow_rechunk=True,
)
da_out[v] = filtered.compute()
return da_out
def filter(self, *args, **kwargs):
"""Run the filtering process on this experiment.
Note:
Instead of `f.filter(...)`, you can call `f(...)` directly.
This is main method of the filtering workflow. The timesteps
to filter may either be specified manually, or determined from
the window size and the timesteps within the input files. In
this latter case, only timesteps that have the full window
size on either side are selected.
Note:
If `absolute` is True, the times must be the same datatype
as those the input data. For dates with a calendar, this
is likely :obj:`np.datetime64` or :obj:`cftime.datetime`.
For abstract times, this may simply be a number.
Args:
times (:obj:`[float]`, optional): A list of timesteps at
which to run the filtering. If this is omitted, all
timesteps that are fully covered by the filtering
window are selected.
clobber (:obj:`bool`, optional): Whether to overwrite any
existing output file with the same name as this
experiment. Default behaviour will not clobber an
existing output file.
absolute (:obj:`bool`, optional): If `times` is provided,
this argument determines whether to interpret them
as relative to the first timestep in the input dataset
(False, default), or as absolute, following the actual
time dimension in the dataset (True).
"""
self(*args, **kwargs)
def create_out(self, clobber=False, date=None):
"""Create a netCDF dataset to hold filtered output.
Here we create a new ``netCDF4.Dataset`` for filtered
output. For each sampled variable in the input files, a
corresponding variable in created in the output file, with
the same dimensions.
Returns:
netCDF4.Dataset: A single dataset that will hold all
filtered output.
"""
# the output dataset we're creating
filename = self.name
if date is not None:
date_str = str(date)[:13]
filename += '_' + date_str
filename += ".nc"
print(filename)
ds = netCDF4.Dataset(filename, "w", clobber=clobber)
# helper function to create the dimensions in the ouput file
def create_dimension(dims, dim, var):
# translate from parcels -> file convention
# and check whether we've already created this dimension
# (e.g. for a previous variable)
file_dim = dims[dim]
if file_dim in ds.variables:
return ds.variables[file_dim].dimensions[
1
if len(ds.variables[file_dim].dimensions) > 1 and dim == "lon"
else 0
]
# get the file containing the dimension data
v_orig = self._variables.get(var, var)
if isinstance(self._filenames, xr.Dataset):
ds_orig = self._filenames[file_dim]
else:
if isinstance(self._filenames[var], dict):
filename = self._filenames[var][dim]
else:
filename = self._filenames[var]
if isinstance(filename, list):
filename = filename[0]
ds_orig = xr.open_dataset(next(iglob(filename)))[file_dim]
# create dimensions if needed
for d in ds_orig.dims:
if d not in ds.dimensions:
ds.createDimension(d, ds_orig[d].size)
# create the dimension variable
ds.createVariable(file_dim, ds_orig.dtype, dimensions=ds_orig.dims)
ds.variables[file_dim][:] = ds_orig
# curvilinear grid case
return ds_orig.dims[1 if len(ds_orig.dims) > 1 and dim == "lon" else 0]
# create a time dimension if dimensions are uniform across all variables
if "time" in self._dimensions:
dim_time = self._dimensions["time"]
ds.createDimension(dim_time)
# Add by FlG
if date is not None:
ds.createVariable(
"time",
"float32",
dimensions=(dim_time,),
)
calendar = 'standard'
units = 'seconds since 1900-01-01 00:00'
ts = (date - | np.datetime64('1970-01-01T00:00:00Z') | numpy.datetime64 |
# -*- coding: utf-8 -*-
import numpy as np
def multi_vertical_crop(img, pieces=3):
"""按列对图片进行切割。首先找出目标片段所在的列,这个需要按需调整筛选条件。
然后,筛选出边界点,即cutpoints。因为边界点的下一点即是新的片段的起始点,
所以有了pieces个边界点后,就可以进行切割了。
Args:
img: PIL.Image object
pieces: number of pieces
Returns:
list of PIL.Image object
"""
width, height = img.size
data = np.array(img)
# 以黑点(<140)的数目大于2作为分界条件
points = [i for i in range(width) if np.sum(data[:, i] < 140) > 0]
# 找出边界点
cutpoints = [i for i in range(len(points)-1) if points[i]+1 != points[i+1]]
if len(cutpoints) != pieces:
print("image has something unfit")
return None
i, j, k = cutpoints
# 边界点的下一点即是新片段的起始点
cutpoints = ((points[0], points[i]), (points[i+1], points[j]),
(points[j+1], points[k]), (points[k+1], points[-1]))
imagelist = []
for start, end in cutpoints:
# end+1是因为crop不包含边界,需要+1包含
imagelist.append(img.crop((start, 0, end+1, height)))
return imagelist
def horizon_crop(img):
"""按行切割,规则按需设置"""
width, height = img.size
data = np.array(img)
# 这里设置的规则是一行中黑点(<140)不小于2个
points = [i for i in range(height) if np.sum(data[i, :] < 140) >= 1]
start, end = points[0], points[-1]
# +1 保留最后一行
img_ = img.crop((0, start, width, end+1))
return img_
def vertical_crop(img):
"""按列剪切,简版,详细文档见multi_vertical_crop"""
width, height = img.size
data = | np.array(img) | numpy.array |
# Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Linear Gaussian State Space Model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Dependency imports
import numpy as np
import tensorflow as tf
import tensorflow_probability as tfp
from tensorflow_probability.python.distributions.linear_gaussian_ssm import _augment_sample_shape
from tensorflow_probability.python.distributions.linear_gaussian_ssm import build_kalman_cov_step
from tensorflow_probability.python.distributions.linear_gaussian_ssm import build_kalman_filter_step
from tensorflow_probability.python.distributions.linear_gaussian_ssm import build_kalman_mean_step
from tensorflow_probability.python.distributions.linear_gaussian_ssm import kalman_transition
from tensorflow_probability.python.distributions.linear_gaussian_ssm import KalmanFilterState
from tensorflow_probability.python.distributions.linear_gaussian_ssm import linear_gaussian_update
from tensorflow.python.framework import test_util
from tensorflow.python.platform import test
tfl = tf.linalg
tfd = tfp.distributions
@test_util.run_all_in_graph_and_eager_modes
class IIDNormalTest(test.TestCase):
def setUp(self):
pass
def _build_iid_normal_model(self,
num_timesteps,
latent_size,
observation_size,
transition_variance,
obs_variance):
"""Build a model whose outputs are IID normal by construction."""
# Use orthogonal matrices to project a (potentially
# high-dimensional) latent space of IID normal variables into a
# low-dimensional observation that is still IID normal.
random_orthogonal_matrix = lambda: np.linalg.qr(
np.random.randn(latent_size, latent_size))[0][:observation_size, :]
obs_matrix = tf.convert_to_tensor(random_orthogonal_matrix(),
dtype=tf.float32)
model = tfd.LinearGaussianStateSpaceModel(
num_timesteps=num_timesteps,
transition_matrix=tf.zeros((latent_size, latent_size)),
transition_noise=tfd.MultivariateNormalDiag(
scale_diag=tf.sqrt(transition_variance)*tf.ones((latent_size))),
observation_matrix=obs_matrix,
observation_noise=tfd.MultivariateNormalDiag(
scale_diag=tf.sqrt(obs_variance)*tf.ones((observation_size))),
initial_state_prior=tfd.MultivariateNormalDiag(
scale_diag=tf.sqrt(transition_variance)*tf.ones((latent_size))),
validate_args=True)
return model
def test_iid_normal_sample(self):
num_timesteps = 10
latent_size = 3
observation_size = 2
num_samples = 10000
for transition_variance_val in [.3, 100.]:
for obs_variance_val in [.6, 40.]:
iid_latents = self._build_iid_normal_model(
num_timesteps=num_timesteps,
latent_size=latent_size,
observation_size=observation_size,
transition_variance=transition_variance_val,
obs_variance=obs_variance_val)
x = iid_latents.sample(num_samples)
x_val = self.evaluate(x)
result_shape = [num_timesteps, observation_size]
marginal_variance = transition_variance_val + obs_variance_val
stderr_mean = np.sqrt(num_samples * marginal_variance)
stderr_variance = marginal_variance * np.sqrt(2./(num_samples-1))
self.assertAllClose(np.mean(x_val, axis=0),
np.zeros(result_shape),
atol=5*stderr_mean)
self.assertAllClose(np.var(x_val, axis=0),
np.ones(result_shape) * marginal_variance,
rtol=5*stderr_variance)
def test_iid_normal_logprob(self):
# In the case where the latent states are iid normal (achieved by
# setting the transition matrix to zero, so there's no dependence
# between timesteps), and observations are also independent
# (achieved by using an orthogonal matrix as the observation model),
# we can verify log_prob as a simple iid Gaussian log density.
delta = 1e-4
for transition_variance_val in [1., 1e-8]:
for obs_variance_val in [1., 1e-8]:
iid_latents = self._build_iid_normal_model(
num_timesteps=10,
latent_size=4,
observation_size=2,
transition_variance=transition_variance_val,
obs_variance=obs_variance_val)
x = iid_latents.sample([5, 3])
lp_kalman = iid_latents.log_prob(x)
marginal_variance = transition_variance_val + obs_variance_val
lp_iid = tf.reduce_sum(
tfd.Normal(0., tf.sqrt(marginal_variance)).log_prob(x),
axis=(-2, -1))
lp_kalman_val, lp_iid_val = self.evaluate((lp_kalman, lp_iid))
self.assertAllClose(lp_kalman_val,
lp_iid_val,
rtol=delta, atol=0.)
@test_util.run_all_in_graph_and_eager_modes
class BatchTest(test.TestCase):
"""Test that methods broadcast batch dimensions for each parameter."""
def setUp(self):
pass
def _build_random_model(self,
num_timesteps,
latent_size,
observation_size,
prior_batch_shape=None,
transition_matrix_batch_shape=None,
transition_noise_batch_shape=None,
observation_matrix_batch_shape=None,
observation_noise_batch_shape=None):
"""Builds a LGSSM with random normal ops of specified shape."""
prior_batch_shape = (
[] if prior_batch_shape is None else prior_batch_shape)
transition_matrix_batch_shape = ([] if transition_matrix_batch_shape is None
else transition_matrix_batch_shape)
transition_noise_batch_shape = ([] if transition_noise_batch_shape is None
else transition_noise_batch_shape)
observation_matrix_batch_shape = ([]
if observation_matrix_batch_shape is None
else observation_matrix_batch_shape)
observation_noise_batch_shape = ([] if observation_noise_batch_shape is None
else observation_noise_batch_shape)
return tfd.LinearGaussianStateSpaceModel(
num_timesteps=num_timesteps,
transition_matrix=tf.random_normal(
transition_matrix_batch_shape + [latent_size, latent_size]),
transition_noise=tfd.MultivariateNormalDiag(
scale_diag=tf.nn.softplus(tf.random_normal(
transition_noise_batch_shape + [latent_size]))),
observation_matrix=tf.random_normal(
observation_matrix_batch_shape + [observation_size, latent_size]),
observation_noise=tfd.MultivariateNormalDiag(
scale_diag=tf.nn.softplus(tf.random_normal(
observation_noise_batch_shape + [observation_size]))),
initial_state_prior=tfd.MultivariateNormalDiag(
scale_diag=tf.nn.softplus(tf.random_normal(
prior_batch_shape + [latent_size]))),
validate_args=True)
def _sanity_check_shapes(self, model,
batch_shape,
event_shape,
sample_shape=(2, 1)):
# Lists can't be default arguments, but we'll want sample_shape to
# be a list so we can concatenate with other shapes passed as
# lists.
sample_shape = list(sample_shape)
self.assertEqual(model.event_shape.as_list(), event_shape)
self.assertEqual(model.batch_shape.as_list(), batch_shape)
y = model.sample(sample_shape)
self.assertEqual(y.shape.as_list(),
sample_shape + batch_shape + event_shape)
lp = model.log_prob(y)
self.assertEqual(lp.shape.as_list(), sample_shape + batch_shape)
# Try an argument with no batch shape to ensure we broadcast
# correctly.
unbatched_y = tf.random_normal(event_shape)
lp = model.log_prob(unbatched_y)
self.assertEqual(lp.shape.as_list(), batch_shape)
self.assertEqual(model.mean().shape.as_list(),
batch_shape + event_shape)
self.assertEqual(model.variance().shape.as_list(),
batch_shape + event_shape)
def test_constant_batch_shape(self):
"""Simple case where all components have the same batch shape."""
num_timesteps = 5
latent_size = 3
observation_size = 2
batch_shape = [3, 4]
event_shape = [num_timesteps, observation_size]
model = self._build_random_model(num_timesteps,
latent_size,
observation_size,
prior_batch_shape=batch_shape,
transition_matrix_batch_shape=batch_shape,
transition_noise_batch_shape=batch_shape,
observation_matrix_batch_shape=batch_shape,
observation_noise_batch_shape=batch_shape)
# check that we get the basic shapes right
self.assertEqual(model.latent_size, latent_size)
self.assertEqual(model.observation_size, observation_size)
self._sanity_check_shapes(model, batch_shape, event_shape)
def test_broadcast_batch_shape(self):
"""Broadcasting when only one component has batch shape."""
num_timesteps = 5
latent_size = 3
observation_size = 2
batch_shape = [3, 4]
event_shape = [num_timesteps, observation_size]
# Test batching only over the prior
model = self._build_random_model(num_timesteps,
latent_size,
observation_size,
prior_batch_shape=batch_shape)
self._sanity_check_shapes(model, batch_shape, event_shape)
# Test batching only over the transition op
model = self._build_random_model(num_timesteps,
latent_size,
observation_size,
transition_matrix_batch_shape=batch_shape)
self._sanity_check_shapes(model, batch_shape, event_shape)
# Test batching only over the transition noise
model = self._build_random_model(num_timesteps,
latent_size,
observation_size,
transition_noise_batch_shape=batch_shape)
self._sanity_check_shapes(model, batch_shape, event_shape)
# Test batching only over the observation op
model = self._build_random_model(num_timesteps,
latent_size,
observation_size,
observation_matrix_batch_shape=batch_shape)
self._sanity_check_shapes(model, batch_shape, event_shape)
# Test batching only over the observation noise
model = self._build_random_model(num_timesteps,
latent_size,
observation_size,
observation_noise_batch_shape=batch_shape)
self._sanity_check_shapes(model, batch_shape, event_shape)
def test_batch_shape_error(self):
# build a dist where components have incompatible batch
# shapes. this should cause a problem somehow.
pass
class _KalmanStepsTest(object):
def setUp(self):
# Define a simple model with 2D latents and 1D observations.
self.transition_matrix = | np.asarray([[1., .5], [-.2, .3]], dtype=np.float32) | numpy.asarray |
#!/usr/bin/env python
"""
Audio Feature Extractors
A set of algorithms for analyzing audio files. Most of the features are built
using building blocks from the Essentia audio and music analysis toolkit:
https://essentia.upf.edu/index.html
<NAME> - <EMAIL>
University of Victoria
"""
from abc import ABC, abstractmethod
import math
import numpy as np
from scipy.stats import norm, linregress
import essentia
import essentia.standard as es
import uvic_music_extractor.utils as utils
class ExtractorBase(ABC):
"""
Base class for audio feature extractors
:param sample_rate (int): rate to run extraction at
:param pooling (bool): indicates whether results of this extractor are summarized
over time using pooling.
:param stats (list): stats to run during pooling aggregation (if used).
"""
def __init__(self, sample_rate: float, pooling: bool = False, stats: list = None):
self.sample_rate = sample_rate
self.pooling = pooling
self.feature_names = []
if stats is None:
self.stats = ["mean", "stdev"]
@abstractmethod
def __call__(self, audio: np.ndarray):
"""
Abstract method -- must be implemented in inheriting classes
:param audio (np.ndarray): input audio to run feature extraction on
:return:
"""
pass
def get_headers(self, join="."):
"""
Get a list of the features combined with aggregation
:return: list
"""
if not self.pooling:
return self.feature_names
headers = []
for feature in self.feature_names:
for stat in self.stats:
headers.append("{}{}{}".format(feature, join, stat))
return headers
class Spectral(ExtractorBase):
"""
Spectral audio feature extraction.
:param sample_rate (int): rate to run extraction at
:param frame_size (int): size of frame to use for spectral processing
:param stats (list): stats to run during pooling aggregation (time summarization of
spectral results)
"""
def __init__(
self, sample_rate: float,
frame_size: float = 2048,
stats: list = None
):
super().__init__(sample_rate, pooling=True, stats=stats)
self.frame_size = frame_size
self.feature_names = [
"spectral_centroid",
"spectral_spread",
"spectral_skewness",
"spectral_kurtosis",
"spectral_flatness",
"spectral_entropy",
"rolloff_85",
"rolloff_95",
"harsh",
"energy_lf",
"dissonance",
"inharmonicity"
]
def __call__(self, audio: np.ndarray):
"""
Run audio
:param audio (np.ndarray): input audio
:return: feature matrix
"""
# Pooling for summarizing results over time
pool = essentia.Pool()
pool_agg = es.PoolAggregator(defaultStats=self.stats)
window = es.Windowing(type="hann", size=self.frame_size)
spectrum = es.Spectrum()
# Spectral feature extractors
centroid = es.Centroid(range=self.sample_rate/2)
central_moments = es.CentralMoments(range=self.sample_rate/2)
dist_shape = es.DistributionShape()
flatness = es.Flatness()
entropy = es.Entropy()
energy_band_harsh = es.EnergyBandRatio(sampleRate=self.sample_rate,
startFrequency=2000,
stopFrequency=5000)
energy_band_low = es.EnergyBandRatio(sampleRate=self.sample_rate,
startFrequency=20,
stopFrequency=80)
rolloff_85 = es.RollOff(cutoff=0.85, sampleRate=self.sample_rate)
rolloff_95 = es.RollOff(cutoff=0.95, sampleRate=self.sample_rate)
# Extractors for calculating dissonance and inharmonicity
peaks = es.SpectralPeaks()
dissonance = es.Dissonance()
pitch_yin = es.PitchYinFFT(frameSize=self.frame_size,
sampleRate=self.sample_rate)
harmonic_peaks = es.HarmonicPeaks()
inharmonicity = es.Inharmonicity()
# Frame-by-frame computation
for frame in es.FrameGenerator(audio, self.frame_size, self.frame_size // 2):
# Window frame and compute spectrum
win = window(frame)
spec = spectrum(win)
# Spectral feature extraction
sc = centroid(spec)
moments = central_moments(spec)
spread, skewness, kurtosis = dist_shape(moments)
spectral_flatness = flatness(spec)
spectral_entropy = entropy(spec)
harsh = energy_band_harsh(spec)
energy_lf = energy_band_low(spec)
roll85 = rolloff_85(spec)
roll95 = rolloff_95(spec)
# Spectral Peaks
peak_freqs, peak_mags = peaks(spec)
# Remove DC bin peak if it is present
if peak_freqs[0] == 0:
peak_freqs = peak_freqs[1:]
peak_mags = peak_mags[1:]
# Calculate dissonance and inharmonicity from peaks
dissonance_val = dissonance(peak_freqs, peak_mags)
pitch, _ = pitch_yin(spec)
harm_freqs, harm_mags = harmonic_peaks(peak_freqs, peak_mags, pitch)
inharm = inharmonicity(harm_freqs, harm_mags)
# Add to pool for summarization
keys = self.feature_names
pool.add(keys[0], sc)
pool.add(keys[1], spread)
pool.add(keys[2], skewness)
pool.add(keys[3], kurtosis)
pool.add(keys[4], spectral_flatness)
pool.add(keys[5], spectral_entropy)
pool.add(keys[6], roll85)
pool.add(keys[7], roll95)
pool.add(keys[8], harsh)
pool.add(keys[9], energy_lf)
pool.add(keys[10], dissonance_val)
pool.add(keys[11], inharm)
stats = pool_agg(pool)
results = [stats[feature] for feature in self.get_headers()]
return results
class CrestFactor(ExtractorBase):
"""
Crest Factor Extractor
Peak-to-average ratio where peak is the the maximum amplitude level and
average is the RMS value.
https://en.wikipedia.org/wiki/Crest_factor
:param sample_rate (int): rate to run extraction at
:param frame_size (int): size of frame to use
:param stats (list): stats to run during pooling aggregation (time summarization)
"""
def __init__(
self,
sample_rate: float,
frame_size: float = None,
stats: list = None
):
super().__init__(sample_rate, pooling=frame_size is not None, stats=stats)
self.frame_size = frame_size
self.feature_names = ["crest_factor"]
def __call__(self, audio: np.ndarray):
"""
Run crest factor audio feature extraction
:param audio: Input audio samples
:return: feature matrix
"""
rms = es.RMS()
minimum = es.MinMax(type='min')
maximum = es.MinMax(type='max')
if self.frame_size:
pool = essentia.Pool()
pool_agg = es.PoolAggregator(defaultStats=self.stats)
for frame in es.FrameGenerator(audio, self.frame_size, self.frame_size):
frame_rms = rms(frame)
frame_peak_min = minimum(frame)[0]
frame_peak_max = maximum(frame)[0]
frame_peak = max(abs(frame_peak_min), abs(frame_peak_max))
frame_crest = frame_peak / frame_rms
pool.add('crest_factor', frame_crest)
stats = pool_agg(pool)
crest_factor = [stats['crest_factor.{}'.format(stat)] for stat in self.stats]
else:
full_rms = rms(audio)
full_peak_min = minimum(audio)[0]
full_peak_max = maximum(audio)[0]
full_peak = max(abs(full_peak_min), abs(full_peak_max))
crest_factor = [full_peak / full_rms]
return crest_factor
class Loudness(ExtractorBase):
"""
Loudness Features
Loudness Range
--------------
Loudness range is computed from short-term loudness values. It is defined as the
difference between the estimates of the 10th and 95th percentiles of the
distribution of the loudness values with applied gating. See Essentia documentation
for more information: https://essentia.upf.edu/reference/std_LoudnessEBUR128.html
EBU Tech Doc 3342-2011. "Loudness Range: A measure to supplement loudness
normalisation in accordance with EBU R 128"
LDR_95, LDR_max, peak-to-loudness
--------------------------------
LDR is a measurement of microdynamics. It is computed by taking the difference
between loudness measurements using a fast integration time and a slow integration
time, then computing the maximum or 95 percentile value from those results.
Peak-to-loudness is computed by taking the ratio between the true peak amplitude
and the overall loudness.
<NAME>. "Measures of microdynamics." Audio Engineering Society
Convention 137. Audio Engineering Society, 2014.
top1db
------
Ratio of audio samples in the range [-1dB, 0dB]
<NAME>, et al. "Production effect: audio features for recording
techniques description and decade prediction." 2011.
:param sample_rate (int): rate to run extraction at
"""
def __init__(self, sample_rate: float):
super().__init__(sample_rate, pooling=False, stats=None)
self.feature_names = [
"loudness_range",
"microdynamics_95%",
"microdynamics_100%",
"peak_to_loudness",
"top1db"
]
def __call__(self, audio: np.ndarray):
"""
Run loudness / dynamics feature extraction
:param audio: Input audio samples
:return: feature matrix
"""
loudness = es.LoudnessEBUR128(startAtZero=True, sampleRate=self.sample_rate)
loudness_stats = loudness(audio)
loudness_range = loudness_stats[3]
# Micro dynamics (LDR)
micro_dynamics = loudness_stats[0] - loudness_stats[1]
ldr_95 = np.percentile(micro_dynamics, 95.0)
ldr_max = micro_dynamics.max()
# True peak detection for peak to loudness calculation
true_peak_detector = es.TruePeakDetector(sampleRate=self.sample_rate)
true_peak_audio_l = true_peak_detector(audio[:, 0])[1]
true_peak_l = 20 * math.log10(true_peak_audio_l.max())
true_peak_audio_r = true_peak_detector(audio[:, 1])[1]
true_peak_r = 20 * math.log10(true_peak_audio_r.max())
# True peak to loudness
true_peak = max(true_peak_l, true_peak_r)
peak_to_loudness = true_peak / loudness_stats[2]
# Top 1 dB (ratio of samples in the top 1dB)
top_1db_gain = math.pow(10, -1.0 / 20.0)
top_1db_l = (true_peak_audio_l > top_1db_gain).sum()
top_1db_r = (true_peak_audio_l > top_1db_gain).sum()
top1db = (top_1db_l + top_1db_r) / (len(true_peak_audio_l) + len(true_peak_audio_r))
return [loudness_range, ldr_95, ldr_max, peak_to_loudness, top1db]
class DynamicSpread(ExtractorBase):
"""
Dynamic Spread Feature Extractor. Measure of the loudness spread across the audio
file. The difference between the loudness (using Vickers algorithm) for each frame
compared to the average loudness of the entire track is computed. Then, the average
of that is computed.
<NAME>. "Automatic long-term loudness and dynamics matching." Audio
Engineering Society Convention 111. Audio Engineering Society, 2001.
:param sample_rate (int): rate to run extraction at
:param frame_size (int): size of frame to use. Defaults to 2048.
"""
def __init__(
self,
sample_rate: float,
frame_size: float = 2048,
):
super().__init__(sample_rate, pooling=False, stats=None)
self.frame_size = frame_size
self.feature_names = ["dynamic_spread"]
def __call__(self, audio: np.ndarray):
"""
Run loudness feature extraction
:param audio: Input audio samples
:return: feature matrix
"""
vickers_loudness = es.LoudnessVickers()
pool = essentia.Pool()
pool_agg = es.PoolAggregator(defaultStats=['mean'])
# Calculate the Vickers loudness frame by frame
for frame in es.FrameGenerator(audio, self.frame_size, self.frame_size):
frame_loudness = vickers_loudness(frame)
pool.add('vdb', frame_loudness)
# Compute the average loudness across frames
stats = pool_agg(pool)
vickers_mean = stats['vdb.mean']
# Compute the difference between loudness at each frame and the mean loudness
dynamic_spread = 0.0
for vdb in pool['vdb']:
dynamic_spread += abs(vdb - vickers_mean)
dynamic_spread /= len(pool['vdb'])
return [dynamic_spread]
class Distortion(ExtractorBase):
"""
Set of distortion features -- computes a probability density function on audio
samples using a histogram with 1001 bins. Several statistics are computed on the
resulting pdf including the centroid, spread, skewness, kurtosis, flatness, and
the 'gauss' feature. 'Gauss' is a measurement of the gaussian fit of the the pdf.
Wilson, Alex, and <NAME>. "Characterisation of distortion profiles in
relation to audio quality." Proc. of the 17th Int. Conference on Digital Audio
Effects (DAFx-14). 2014.
<NAME>., and <NAME>. "Perception & evaluation of audio quality in
music production." Proc. of the 16th Int. Conference on Digital Audio Effects
(DAFx-13). 2013.
:param sample_rate (int): rate to run extraction at
"""
def __init__(self, sample_rate: float):
super().__init__(sample_rate, pooling=False, stats=None)
self.feature_names = [
"pmf_centroid",
"pmf_spread",
"pmf_skewness",
"pmf_kurtosis",
"pmf_flatness",
"pmf_gauss"
]
def __call__(self, audio: np.ndarray):
"""
Run distortion feature extraction
:param audio: Input audio samples
:return: feature matrix
"""
# Compute PDF of audio sample amplitudes
hist, edges = np.histogram(audio, bins=1001, range=(-1.0, 1.0), density=True)
hist = np.array(hist, dtype=np.float32)
# Analysis of PDF shape
centroid_calc = es.Centroid()
centroid = centroid_calc(hist)
central_moments = es.CentralMoments()
shape = es.DistributionShape()
cm = central_moments(hist)
spread, skewness, kurtosis = shape(cm)
flatness_calc = es.Flatness()
flatness = flatness_calc(hist)
# Compute r squared value of guassian fit
mu, std = norm.fit(audio)
gauss = norm.pdf(np.linspace(-1.0, 1.0, 1001), mu, std)
_, _, rvalue, _, _ = linregress(gauss, hist)
r_squared = rvalue ** 2
return [centroid, spread, skewness, kurtosis, flatness, r_squared]
class StereoFeatures(ExtractorBase):
"""
Stereo Feature Extractor: Sides-to-mid ratio and left-right imbalance
<NAME>., et al. "An analysis and evaluation of audio features for multitrack
music mixtures." (2014).
:param sample_rate (int): rate to run extraction at
"""
def __init__(self, sample_rate: float):
super().__init__(sample_rate, pooling=False, stats=None)
self.feature_names = ["side_mid_ratio", "lr_imbalance"]
def __call__(self, audio: np.ndarray):
"""
Run stereo feature extraction
:param audio: Input audio samples
:return: feature matrix
"""
sides = (audio[:, 0] - audio[:, 1]) ** 2
mids = (audio[:, 0] + audio[:, 1]) ** 2
sides_mid_ratio = sides.mean() / mids.mean()
left_power = (audio[:, 0] ** 2).mean()
right_power = (audio[:, 1] ** 2).mean()
lr_imbalance = (right_power - left_power) / (right_power + left_power)
return sides_mid_ratio, lr_imbalance
class PhaseCorrelation(ExtractorBase):
"""
Phase Correlation feature extraction. Calculates the correlation coefficient
between the left and right channel. If a frame_size of None is based in then the
calculation is performed on the entire audio signal. Otherwise, frame-by-frame
processing is computed using the frame_size number of samples and the results are
summarized using the passed in stats.
:param sample_rate (float): rate to run extraction at
:param frame_size (int): number of samples per frame for frame-by-frame processing.
If None then computation is performed over the entire input. Defaults to None.
:param stats (list): a list of strings indicating the stats to use during time
summarization. Only applied if frame-by-frame processing is computed.
"""
def __init__(
self,
sample_rate: float,
frame_size: int = None,
stats: list = None
):
super().__init__(sample_rate, pooling=frame_size is not None, stats=stats)
self.frame_size = frame_size
self.feature_names = ["phase_correlation"]
def __call__(self, audio: np.ndarray):
"""
Run phase correlation feature extraction.
:param audio: Input audio samples
:return: feature matrix
"""
if self.frame_size:
max_sample = audio.shape[0]
slice_indices = list(range(0, max_sample, self.frame_size))
slice_indices.append(max_sample)
pool = essentia.Pool()
for i in range(len(slice_indices) - 1):
x1 = slice_indices[i]
x2 = slice_indices[i + 1]
correlation_matrix = np.corrcoef(audio[x1:x2, 0], audio[x1:x2, 1])
phase_correlation = correlation_matrix[0, 1]
pool.add(self.feature_names[0], phase_correlation)
pool_agg = es.PoolAggregator(defaultStats=self.stats)
stats = pool_agg(pool)
phase_correlation = [stats["{}.{}".format(self.feature_names[0], stat)] for stat in self.stats]
else:
correlation_matrix = np.corrcoef(audio[:, 0], audio[:, 1])
phase_correlation = [correlation_matrix[0, 1]]
return phase_correlation
class StereoSpectrum(ExtractorBase):
"""
Stereo Spectrum Features. Panning features computed using spectrums from the left
and right audio channels. Returns features from the entire spectrum as well as
three subbands which include 0-250Hz, 250-2800Hz, and 2800+ Hz.
Tzanetakis, George, <NAME>, and <NAME>. "Stereo Panning Features for
Classifying Recording Production Style." ISMIR. 2007.
"""
def __init__(
self,
sample_rate: float,
frame_size: int = 2048,
hop_size: int = 1024,
stats: list = None
):
super().__init__(sample_rate, pooling=True, stats=stats)
self.frame_size = frame_size
self.hop_size = hop_size
self.low = 250
self.high = 2800
self.feature_names = ["sps_full", "sps_low", "sps_mid", "sps_high"]
def __call__(self, audio: np.ndarray):
"""
Run stereo spectrum feature extraction
:param audio: Input audio samples
:return: feature matrix
"""
# Must be stereo audio
assert audio.shape[1] == 2
# Hanning window
window = np.hanning(self.frame_size)
pool = essentia.Pool()
pool_agg = es.PoolAggregator(defaultStats=self.stats)
# Bin numbers for each filter bank
low_bin = int((self.low / self.sample_rate) * self.frame_size)
assert low_bin <= int(self.frame_size / 2)
high_bin = int((self.high / self.sample_rate) * self.frame_size)
assert high_bin <= int(self.frame_size / 2)
for i in range(0, len(audio), self.hop_size):
# Get the windowed frame for each channel
samples = audio[i:i+self.frame_size, :]
frame_left = np.zeros(self.frame_size)
frame_left[:len(samples)] = samples[:, 0]
frame_right = np.zeros(self.frame_size)
frame_right[:len(samples)] = samples[:, 1]
# Apply window
frame_left *= window
frame_right *= window
X_left = np.fft.rfft(frame_left)
X_right = np.fft.rfft(frame_right)
stereo_spectrum = StereoSpectrum.compute_stereo_spectrum(X_left, X_right)
# Features
full = utils.rms(stereo_spectrum)
low = utils.rms(stereo_spectrum[:low_bin])
mid = utils.rms(stereo_spectrum[low_bin:high_bin])
high = utils.rms(stereo_spectrum[high_bin:])
pool.add(self.feature_names[0], full)
pool.add(self.feature_names[1], low)
pool.add(self.feature_names[2], mid)
pool.add(self.feature_names[3], high)
stats = pool_agg(pool)
results = [stats[feature] for feature in self.get_headers()]
return results
@staticmethod
def compute_stereo_spectrum(spectrum_left, spectrum_right):
"""
Computes the stereo panning features using left and right channel spectrums
:param spectrum_left: magnitude spectrum from the left channel
:param spectrum_right: magnitude spectrum from the right channel
:return: stereo spectrum features
"""
np.zeros_like(spectrum_left)
# Update the DC and Nyquist Bins
spectrum_left[0] = np.real(spectrum_left[0]) + 0j
spectrum_left[-1] = np.real(spectrum_left[-1]) + 0j
spectrum_right[0] = np.real(spectrum_right[0]) + 0j
spectrum_right[-1] = np.real(spectrum_right[-1]) + 0j
real_left = np.real(spectrum_left)
imag_left = np.imag(spectrum_left)
real_right = | np.real(spectrum_right) | numpy.real |
"""
GRUD.py
utilities for GRU-D on MIMIC-III
"""
# import dependecies
import pandas as pd
import numpy as np
import os
import glob
import copy
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torch.utils.data as utils
from torch.autograd import Variable
from torch.nn.parameter import Parameter
import torch.nn.functional as F
import math
import time
class FilterLinear(nn.Module):
"""
As seen in https://github.com/zhiyongc/GRU-D/
"""
def __init__(self, in_features, out_features, filter_square_matrix, bias=True):
'''
filter_square_matrix : filter square matrix, whose each elements is 0 or 1.
'''
super(FilterLinear, self).__init__()
self.in_features = in_features
self.out_features = out_features
use_gpu = torch.cuda.is_available()
self.filter_square_matrix = None
if use_gpu:
self.filter_square_matrix = Variable(filter_square_matrix.cuda(), requires_grad=False)
else:
self.filter_square_matrix = Variable(filter_square_matrix, requires_grad=False)
self.weight = Parameter(torch.Tensor(out_features, in_features))
if bias:
self.bias = Parameter(torch.Tensor(out_features))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
stdv = 1. / math.sqrt(self.weight.size(1))
self.weight.data.uniform_(-stdv, stdv)
if self.bias is not None:
self.bias.data.uniform_(-stdv, stdv)
# print(self.weight.data)
# print(self.bias.data)
def forward(self, input):
# print(self.filter_square_matrix.mul(self.weight))
return F.linear(input, self.filter_square_matrix.mul(self.weight), self.bias)
def __repr__(self):
return self.__class__.__name__ + '(' \
+ 'in_features=' + str(self.in_features) \
+ ', out_features=' + str(self.out_features) \
+ ', bias=' + str(self.bias is not None) + ')'
class GRUD(nn.Module):
def __init__(self, input_size, cell_size, hidden_size, X_mean, device, output_last = False, fp16=False):
"""
With minor modifications from https://github.com/zhiyongc/GRU-D/
Recurrent Neural Networks for Multivariate Times Series with Missing Values
GRU-D: GRU exploit two representations of informative missingness patterns, i.e., masking and time interval.
cell_size is the size of cell_state.
Implemented based on the paper:
@article{che2018recurrent,
title={Recurrent neural networks for multivariate time series with missing values},
author={<NAME> and <NAME> <NAME> <NAME>},
journal={Scientific reports},
volume={8},
number={1},
pages={6085},
year={2018},
publisher={Nature Publishing Group}
}
GRU-D:
input_size: variable dimension of each time
hidden_size: dimension of hidden_state
mask_size: dimension of masking vector
X_mean: the mean of the historical input data
"""
super(GRUD, self).__init__()
self.hidden_size = hidden_size
self.delta_size = input_size
self.mask_size = input_size
# use_gpu = torch.cuda.is_available()
# if use_gpu:
# self.identity = torch.eye(input_size).cuda()
# self.zeros = Variable(torch.zeros(input_size).cuda())
# self.zeros_h = Variable(torch.zeros(self.hidden_size).cuda())
# self.X_mean = Variable(torch.Tensor(X_mean).cuda())
# else:
self.identity = torch.eye(input_size).to(device)
self.zeros = Variable(torch.zeros(input_size)).to(device)
self.zeros_h = Variable(torch.zeros(self.hidden_size)).to(device)
self.X_mean = Variable(torch.Tensor(X_mean)).to(device)
if fp16=='True':
self.identity.half()
self.zeros.half()
self.zeros_h.half()
self.X_mean = self.X_mean.half()
self.zl = nn.Linear(input_size + hidden_size + self.mask_size, hidden_size) # Wz, Uz are part of the same network. the bias is bz
self.rl = nn.Linear(input_size + hidden_size + self.mask_size, hidden_size) # Wr, Ur are part of the same network. the bias is br
self.hl = nn.Linear(input_size + hidden_size + self.mask_size, hidden_size) # W, U are part of the same network. the bias is b
self.gamma_x_l = FilterLinear(self.delta_size, self.delta_size, self.identity)
self.gamma_h_l = nn.Linear(self.delta_size, self.hidden_size) # this was wrong in available version. remember to raise the issue
self.output_last = output_last
self.fc = nn.Linear(self.hidden_size, 2)
def step(self, x, x_last_obsv, x_mean, h, mask, delta):
"""
Inputs:
x: input tensor
x_last_obsv: input tensor with forward fill applied
x_mean: the mean of each feature
h: the hidden state of the network
mask: the mask of whether or not the current value is observed
delta: the tensor indicating the number of steps since the last time a feature was observed.
Returns:
h: the updated hidden state of the network
"""
batch_size = x.shape[0]
dim_size = x.shape[1]
# print(self.zeros.dtype, delta.dtype)
# print(self.gamma_x_l(delta).dtype)
delta_x = torch.exp(-torch.max(self.zeros, self.gamma_x_l(delta))) #exponentiated negative rectifier
delta_h = torch.exp(-torch.max(self.zeros_h, self.gamma_h_l(delta))) #self.zeros became self.zeros_h to accomodate hidden size != input size
# print(x.shape) # 1, 533
# print(x_mean.shape)
# print(delta_x.shape)
# print(x_last_obsv.shape)
x = mask * x + (1 - mask) * (delta_x * x_last_obsv + (1 - delta_x) * x_mean)
h = delta_h * h
# print(x.shape) #534, 533
# print(h.shape) # 1,67
# print(mask.shape) # 1,533
combined = torch.cat((x, h, mask), 1)
z = torch.sigmoid(self.zl(combined)) #sigmoid(W_z*x_t + U_z*h_{t-1} + V_z*m_t + bz)
r = torch.sigmoid(self.rl(combined)) #sigmoid(W_r*x_t + U_r*h_{t-1} + V_r*m_t + br)
# print(x.shape, (r*h).shape, mask.shape)
new_combined=torch.cat((x, r*h, mask), 1)
h_tilde = torch.tanh(self.hl(new_combined)) #tanh(W*x_t +U(r_t*h_{t-1}) + V*m_t) + b
# h_tilde = torch.tanh(self.hl(combined)) #tanh(W*x_t +U(r_t*h_{t-1}) + V*m_t) + b
h = (1 - z) * h + z * h_tilde
return h
def forward(self, X, X_last_obsv, Mask, Delta, pad_mask=None, return_hidden=False):
batch_size = X.size(0)
# type_size = input.size(1)
step_size = X.size(1) # num timepoints
spatial_size = X.size(2) # num features
Hidden_State = self.initHidden(batch_size)
# X = torch.squeeze(input[:,0,:,:])
# X_last_obsv = torch.squeeze(input[:,1,:,:])
# Mask = torch.squeeze(input[:,2,:,:])
# Delta = torch.squeeze(input[:,3,:,:])
if pad_mask is not None:
pass
outputs = None
for i in range(step_size):
Hidden_State = self.step(torch.squeeze(X[:,i:i+1,:], 1)\
, torch.squeeze(X_last_obsv[:,i:i+1,:], 1)\
, torch.unsqueeze(self.X_mean, 0)\
, Hidden_State\
, torch.squeeze(Mask[:,i:i+1,:], 1)\
, torch.squeeze(Delta[:,i:i+1,:], 1))
if outputs is None:
outputs = Hidden_State.unsqueeze(1)
else:
# # this makes the sequence reversed
# outputs = torch.cat((Hidden_State.unsqueeze(1), outputs), 1)
#this preserves the order
outputs = torch.cat((outputs, Hidden_State.unsqueeze(1)), 1)
# print(outputs.shape)
if True:
#binary outcomes for all states
if return_hidden:
# print(self.fc(torch.squeeze(outputs, 0)).shape)
return self.fc(torch.squeeze(outputs, 0)), outputs
else:
# print(self.fc(torch.squeeze(outputs, 0)).shape)
return self.fc(torch.squeeze(outputs, 0))
# we want to predict a binary outcome
else:
# binary outcome for last state
return self.fc(Hidden_State)
# if self.output_last:
# return outputs[:,-1,:]
# else:
# return outputs
def initHidden(self, batch_size):
use_gpu = torch.cuda.is_available()
if use_gpu:
Hidden_State = Parameter(Variable(torch.zeros(batch_size, self.hidden_size).cuda()))
return Hidden_State
else:
Hidden_State = Parameter(Variable(torch.zeros(batch_size, self.hidden_size)))
return Hidden_State
class next_measurement(nn.Module):
"""
predict the next value
"""
def __init__(self, feat_dim, hidden_dim):
super(next_measurement, self).__init__()
#predict next outputs
self.fc1=nn.Linear(hidden_dim, 2048)
self.fc2=nn.Linear(2048, 2048)
self.fc3=nn.Linear(2048, feat_dim)
self.drop=nn.Dropout(p=0.1)
self.rl=nn.ReLU()
def forward(self, x):
x=self.drop(x)
x=self.rl(self.fc1(x))
x=self.rl(self.fc2(x))
x=self.rl(self.fc3(x))
return x
class icd10_head(nn.Module):
"""
Prediction head for ICD10
"""
def __init__(self, icd10_dim, hidden_dim):
super(icd10_head, self).__init__()
#predict next outputs
self.fc1=nn.Linear(hidden_dim, 2048)
self.fc2=nn.Linear(2048, 2048)
self.fc3=nn.Linear(2048, icd10_dim)
self.drop=nn.Dropout(p=0.1)
self.rl=nn.ReLU()
def forward(self, x):
x=self.drop(x)
x=self.rl(self.fc1(x))
x=self.rl(self.fc2(x))
x=self.rl(self.fc3(x))
return x
def train_GRUD(model, train_dataloader, val_dataloader, num_epochs = 300, patience = 3, min_delta = 0.00001, weight=None, multitask=False, multitask_weights=None,):
"""
"""
from tqdm import tqdm
from sklearn.metrics import roc_auc_score
print('Model Structure: ', model)
print('Start Training ... ')
device = torch.device("cuda" if torch.cuda.is_available() and not False else "cpu")
n_gpu = torch.cuda.device_count()
if multitask:
_, _, _, _, _, icd10, sepsis, resp=next(iter(train_dataloader))
try:
hidden_dim=model.hidden_size
except:
hidden_dim=model.module.hidden_size
icd_code_head=icd10_head(icd10.shape[-1], hidden_dim)
icd_code_head.to(device)
if n_gpu>1:
icd_code_head = torch.nn.DataParallel(icd_code_head)
sepsis_head=icd10_head(2, hidden_dim)
sepsis_head.to(device)
if n_gpu>1:
sepsis_head = torch.nn.DataParallel(sepsis_head)
resp_head=icd10_head(2, hidden_dim)
resp_head.to(device)
if n_gpu>1:
resp_head = torch.nn.DataParallel(resp_head)
# soft=nn.Sigmoid()
# if (type(model) == nn.modules.container.Sequential):
# output_last = model[-1].output_last
# print('Output type dermined by the last layer')
# else:
# output_last = model.output_last
# print('Output type dermined by the model')
try:
output_last = model.output_last
except:
#data parallel
output_last = model.module.output_last
if weight is not None:
weight=weight.float().to(device)
# loss_MSE = torch.nn.MSELoss()
# loss_nll=torch.nn.NLLLoss()
loss_CEL=torch.nn.CrossEntropyLoss(reduction='mean', weight=weight)
if multitask:
loss_icd=torch.nn.MultiLabelSoftMarginLoss(reduction ='mean', weight=multitask_weights[0])
loss_sepsis=torch.nn.CrossEntropyLoss(reduction='mean', weight=multitask_weights[1])
loss_resp=torch.nn.CrossEntropyLoss(reduction='mean', weight=multitask_weights[2])
# loss_L1 = torch.nn.L1Loss()
learning_rate = 0.0001
optimizer = torch.optim.RMSprop(model.parameters(), lr = learning_rate, alpha=0.99)
if multitask:
optimizer = torch.optim.RMSprop(list(model.parameters())+list(icd_code_head.parameters())+list(sepsis_head.parameters())+list(resp_head.parameters()), lr = learning_rate, alpha=0.99)
# base_params=[]
# for name, param in model.named_parameters():
# if param.requires_grad & ('fc' not in name):
# #not the fully connected head
# base_params.append(param)
# multitask_optimizer = torch.optim.RMSprop(base_params+list(icd_code_head.parameters()), lr = learning_rate, alpha=0.99)
use_gpu = torch.cuda.is_available()
interval = 100
cur_time = time.time()
pre_time = time.time()
proba_projection=torch.nn.Softmax(dim=1)
# Variables for Early Stopping
is_best_model = 0
patient_epoch = 0
for epoch in range(num_epochs):
losses_epoch_train = []
losses_epoch_valid = []
### TRAIN ---------------------------------------------------------
model.train()
if multitask:
icd_code_head.train()
sepsis_head.train()
resp_head.train()
batches=tqdm(train_dataloader, desc='step', total=len(train_dataloader))
for batch in batches:
batch = tuple(t.to(device) for t in batch)
measurement, measurement_last_obsv, mask, time_, labels, icd10, sepsis_labels, resp_labels = batch
for item in batch:
if np.sum(np.sum(np.sum(np.isnan(item.cpu().data.numpy()))))>0:
print("Nans")
optimizer.zero_grad()
# if multitask:
# multitask_optimizer.zero_grad()
prediction, hidden_states=model(measurement.float(), measurement_last_obsv.float(), mask.float(), time_.float(), return_hidden=True)
# get hidden_state for additional losses
if multitask:
icd10_prediction=icd_code_head(hidden_states)
sepsis_prediction=sepsis_head(hidden_states)
resp_prediction=resp_head(hidden_states)
# add next sequence prediction
# add next measurement prediction
if output_last:
loss_main = loss_CEL(prediction.view(measurement.shape[1],2), labels.long().squeeze(0))
loss_train = loss_main
else:
full_labels = torch.cat((inputs[:,1:,:], labels.long()), dim = 1)
loss_train = loss_MSE(outputs, full_labels)
if multitask:
# print('icd10_prediction: ', icd10_prediction.shape, icd10.shape)
# print('icd10_prediction queezed: ', torch.squeeze(icd10_prediction, 0).shape, torch.unsqueeze(torch.squeeze(icd10), 0).expand_as(torch.squeeze(icd10_prediction, 0)).float().shape)
loss_from_icd=loss_icd(torch.squeeze(icd10_prediction, 0), torch.unsqueeze(torch.squeeze(icd10), 0).expand_as(torch.squeeze(icd10_prediction, 0)).float())
loss_train+=loss_from_icd
loss_from_sepsis=loss_sepsis(sepsis_prediction.view(measurement.shape[1],2), torch.squeeze(sepsis_labels.long(), 0))
loss_train+=loss_from_sepsis
loss_from_resp=loss_resp(resp_prediction.view(measurement.shape[1],2), torch.squeeze(resp_labels.long(), 0))
loss_train+=loss_from_resp
# loss_train.backward(retain_graph=True)
# loss_train_multitask.backward()
else:
pass
loss_train.backward()
optimizer.step()
# if multitask:
# multitask_optimizer.step()
losses_epoch_train.append((loss_main.cpu().data.numpy(),
loss_from_icd.cpu().data.numpy(),
loss_from_sepsis.cpu().data.numpy(),
loss_from_resp.cpu().data.numpy()))
batches.set_description("{:02f}".format(loss_train.cpu().data.numpy()))
### VALIDATION ---------------------------------------------------------
model.eval()
icd_code_head.eval()
sepsis_head.eval()
resp_head.eval()
# batches=tqdm(enumerate(val_dataloader), desc='step', total=len(val_dataloader))
labels=[]
scores=[]
for i, batch in enumerate(val_dataloader):
batch = tuple(t.to(device) for t in batch)
measurement_val, measurement_last_obsv_val, mask_val, time_val, labels_val, icd10, sepsis_labels, resp_labels = batch
# print(measurement_val.shape, measurement_last_obsv_val.shape, mask_val.shape, time_val.shape)
# print(measurement.shape, measurement_last_obsv.shape, mask.shape, time_.shape)
with torch.no_grad():
prediction_val, hidden_states = model(measurement_val.float(), measurement_last_obsv_val.float(), mask_val.float(), time_val.float(), return_hidden=True)
scores.append(proba_projection(prediction_val).detach().cpu().numpy()) # I just learned how to spell detach
labels.append(labels_val.detach().cpu().numpy())
if output_last:
loss_valid =loss_CEL(prediction_val.view(measurement_val.shape[1],2), labels_val.long().squeeze(0))
else:
full_labels_val = torch.cat((inputs_val[:,1:,:], labels_val.long()), dim = 1)
loss_valid = loss_MSE(outputs_val, full_labels_val.long())
if multitask:
icd10_prediction=icd_code_head(hidden_states)
sepsis_prediction=sepsis_head(hidden_states)
resp_prediction=resp_head(hidden_states)
loss_from_icd=loss_icd(torch.squeeze(icd10_prediction, 0), torch.unsqueeze(torch.squeeze(icd10), 0).expand_as(torch.squeeze(icd10_prediction, 0)).float())
loss_valid+=loss_from_icd
loss_from_sepsis=loss_sepsis(sepsis_prediction.view(measurement_val.shape[1],2), torch.squeeze(sepsis_labels.long(), 0))
loss_valid+=loss_from_sepsis
loss_from_resp=loss_resp(resp_prediction.view(measurement_val.shape[1],2), torch.squeeze(resp_labels.long(), 0))
loss_valid+=loss_from_resp
losses_epoch_valid.append((loss_main.cpu().data.numpy(),
loss_from_icd.cpu().data.numpy(),
loss_from_sepsis.cpu().data.numpy(),
loss_from_resp.cpu().data.numpy()))
else:
losses_epoch_valid.append(loss_valid.cpu().data.numpy())
# # compute the loss
# labels.append(label.view(-1,).squeeze().cpu().data.numpy())
# scores.append(m(classfication).view(-1).squeeze().cpu().data.numpy())
# print(sklearn.metrics.roc_auc_score(labels_val.detach().cpu().numpy(), prediction_val.detach().cpu().numpy()[:,1]))
try:
# print("garbage")
# print(np.asarray(losses_epoch_valid).shape)
# print("success")
avg_losses_epoch_valid=np.mean( | np.asarray(losses_epoch_valid) | numpy.asarray |
# -*- coding: utf-8 -*-
"""
Created on Tue Jun 16 12:04:18 2015
@author: fcaldas
"""
import numpy as np
import io;
import numpy as np;
import matplotlib as pl;
from scipy import io
from sklearn import metrics;
import matplotlib.pyplot as plt
import bisect;
from numba import jit
def generate_pairs(label, n_pairs, positive_ratio, random_state=41):
"""Generate a set of pair indices
Parameters
----------
X : array, shape (n_samples, n_features)
Data matrix
label : array, shape (n_samples, 1)
Label vector
n_pairs : int
Number of pairs to generate
positive_ratio : float
Positive to negative ratio for pairs
random_state : int
Random seed for reproducibility
Output
------
pairs_idx : array, shape (n_pairs, 2)
The indices for the set of pairs
label_pairs : array, shape (n_pairs, 1)
The pair labels (+1 or -1)
"""
rng = np.random.RandomState(random_state)
n_samples = label.shape[0]
pairs_idx = np.zeros((n_pairs, 2), dtype=int)
pairs_idx[:, 0] = np.random.randint(0, n_samples, n_pairs)
rand_vec = rng.rand(n_pairs)
for i in range(n_pairs):
if rand_vec[i] <= positive_ratio:
idx_same = np.where(label == label[pairs_idx[i, 0]])[0]
while idx_same.shape[0] == 1:
pairs_idx[i, 0] = rng.randint(0,n_samples)
idx_same = np.where(label == label[pairs_idx[i, 0]])[0]
idx2 = rng.randint(idx_same.shape[0])
pairs_idx[i, 1] = idx_same[idx2]
while pairs_idx[i, 1] == pairs_idx[i, 0]:
idx2 = rng.randint(idx_same.shape[0])
pairs_idx[i, 1] = idx_same[idx2]
else:
idx_diff = np.where(label != label[pairs_idx[i, 0]])[0]
idx2 = rng.randint(idx_diff.shape[0])
pairs_idx[i, 1] = idx_diff[idx2]
pairs_label = 2.0 * (label[pairs_idx[:, 0]] == label[pairs_idx[:, 1]]) - 1.0
return pairs_idx, pairs_label
@jit
def update(X_i, X_j, A, y, u, l, gamma):
diff = X_i - X_j
d = np.dot(diff, np.dot(A , diff))
if (d >u and y == 1) or (d < l and y == -1):
target = u * (y == 1) + l * (y == -1)
_y = ( (gamma * d * target - 1) + np.sqrt((gamma * d * target - 1) ** 2 + 4 * gamma * d * d) )/(2 * gamma * d)
return A - ((gamma * (_y - target)) / (1 + gamma * (_y - target) * d)) * np.outer(np.dot(A, diff), np.dot(A, diff))
else :
return A
@jit
def A_dist_pairs(X , A, pairs):
n_pairs = pairs.shape[0]
dist = np.ones((n_pairs,), dtype=np.dtype("float32"))
for i in range(n_pairs):
diff = X[pairs[i, 0], :] - X[pairs[i, 1], :]
dist[i] = np.dot(diff , np.dot(A , diff))
return | np.sqrt(dist) | numpy.sqrt |
import numpy as np
import Alice
class Network(object):
def __init__(self,sizes,eta=0.01):
self.eta = eta
self.num_layers = len(sizes)
self.sizes = sizes
self.weights = [np.random.randn(x,y) for x,y in zip(sizes[:-1], sizes[1:])]
self.biases = [np.random.randn(1,x) for x in sizes[1:]]
############################################################################
############# NETWORK ######################################################
# funcao para calcular o erro da rede
# y = minha saida
# o = output esperado
def __loss(self,y,o):
return Alice.mse(y,o)
def __d_loss(self,y,o):
return Alice.mse_derivate(y,o)
# funcao para selecionar a saida do neuronio
def __selector(self,z):
return Alice.softmax(z)
def __d_selector(self,z,alpha):
return Alice.softmax_derivate(z,alpha)
# funcao para ativar cada neuronio
def __activation(self,z):
return Alice.sigmoid2(z)
def __d_activation(self,z,alpha):
return Alice.sigmoid2_derivate(z,alpha)
# funcao que realiza a entrada de toda camada anterior
def __layer(self,x,w,b):
return Alice.dotMatrix(x,w,b)
def __d_layer(self,x,w,alpha):
return Alice.dotMatrix_derivate(x,w,alpha)
def __feedForward(self,x):
for w, b in zip(self.weights,self.biases):
x = self.__layer(self.__activation(x),w,b)
#x = self.__activation(self.__layer(x,w,b))
return self.__selector(x)
def __backPropagation(self,x,target):
# feedForward
z = [x] # save all Zs
activations = [] # save all activations
for w, b in zip(self.weights,self.biases):
x = self.__layer(x,w,b)
activations.append(x)
x = self.__activation(x)
z.append(x)
y = self.__selector(x)
derror = self.__d_loss(y,target)
derror = self.__d_selector(z[self.num_layers - 1],derror)
for l in range(1, self.num_layers):
w = self.weights[-l]
b = self.biases[-l]
derror = self.__d_activation(activations[-l],derror)
nabla_w = z[-l-1].transpose().dot(derror) # error for each wij
nabla_b = derror # error for each bias
derror = self.__d_layer(z[-l-1],w,derror)
self.weights[-l] = self.weights[-l] - (self.eta * nabla_w)
self.biases[-l] = self.biases[-l] - (self.eta * nabla_b)
def send(self, l):
x = self.__activation(np.array([l]))
return self.__feedForward(x)[0]
def learn(self,x,y):
x = self.__activation(np.array([x]))
y = | np.array([y]) | numpy.array |
# -*- coding: utf-8 -*-
# Это упрощенная версия бэктеста, который используется на сервере https://bcsquants.com
# Разработчик <NAME> <<EMAIL>>
from __future__ import print_function
from datetime import datetime as dt
import numpy as np
import csv
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.ticker as plotticker
orderList = []
orderEvent = False
tickSizeToSeconds = {'s1': 1, 's5': 5, 'm1': 60, 'm5': 300}
dataKeys = ['time', 'open', 'high', 'low', 'close', 'volume', 'count']
tickKeys = ['direct', 'takeProfit', 'stopLoss', 'holdPeriod', 'datetime']
FEE = 0.0002 # комиссия
allTickers = ['ALRS', 'SNGS', 'MGNT', 'ROSN', 'MOEX', 'VTBR', 'LKOH', 'GAZP', 'SBERP', 'SBER', # акции
'USD000UTSTOM', # валюта
'RTSI', 'MICEXINDEXCF', # индексы
'GZX', 'SIX', 'BRX'] # фъючерсы
_tickSize = 'm5'
def showBacktestResult(result):
return pd.DataFrame(result, index=[x['ticker'] for x in result],
columns=['sumProcent', 'maxDrawdown', 'std',
'minV', 'numDeals',
'sumTakeProfit', 'sumHoldPeriod', 'sumStopLoss'])
def getBacktestResult(init, tick, tickers=allTickers, skipMessage=False, progressBar=True):
result = []
if not isinstance(tickers, list):
tickers = [tickers]
for ticker in tickers:
orderList, orderEvent = [], False
_tickSize, orderList, data = runTick(init, tick, ticker)
res = runOrder(ticker, _tickSize, orderList, data)
res['ticker'] = ticker
res['_tickSize'] = _tickSize
result.append(res)
if progressBar:
print(ticker, end='\n' if ticker == tickers[-1] else ', ')
if not skipMessage:
print('Не забудьте вы можете посмотреть тики и заявки в соответствующих файлах')
print('tickFile = data/order/TICKER_{0}_tick.csv'.format(_tickSize))
print('orderFile = data/order/TICKER_{0}_order.csv'.format(_tickSize))
return result
def order(direct, takeProfit, stopLoss, holdPeriod):
global orderList, orderEvent, tickSizeToSeconds, _tickSize
if not isinstance(holdPeriod, int):
raise Exception('Hold period must be int type. If you use division with operator /, ' +
'remember in python3 this operation converts result to float type, ' +
'use // instead or convert to int directly')
if holdPeriod * tickSizeToSeconds[_tickSize] < 300:
raise Exception('Hold period must be not less than 300 seconds')
if takeProfit < 0.0004:
raise Exception('Take profit must be not less than 0.0004')
if stopLoss < 0.0004:
raise Exception('Stop loss must be not less than 0.0004')
orderList.append([direct, takeProfit, stopLoss, holdPeriod])
orderEvent = True
def runTick(init, tick, ticker):
global orderList, orderEvent, _tickSize
orderList, orderEvent = [], False
class Empty:
pass
self = Empty()
init(self)
_tickSize = getattr(self, '_tickSize', 'm5')
_window = getattr(self, '_window', None)
if _window is not None:
_window = int(_window)
data = {key: np.load('data/{0}/{1}/{2}.npy'.format(ticker, _tickSize, key), encoding='bytes') for key in dataKeys }
for ind in range(1, len(data['time'])):
if _window:
if ind < _window:
continue
else:
tick(self, { key: data[key][ind - _window:ind] for key in dataKeys })
else:
tick(self, { key: data[key][:ind] for key in dataKeys })
if orderEvent:
for jnd in range(len(orderList) - 1, -1, -1): # [len(orderList) - 1, ..., 0]
if len(orderList[jnd]) == 4:
orderList[jnd].append(data['time'][ind])
else:
break
orderEvent = False
with open('data/order/{0}_{1}_tick.csv'.format(ticker, _tickSize), 'w') as file:
file.write(';'.join(tickKeys) + '\n')
for order in orderList:
file.write(';'.join([str(elem) for elem in order]) + '\n')
return _tickSize, orderList, data
def runOrder(ticker, _tickSize, orderList, dataNpy):
measure = {'deals': [], 'sumProcent': 0.0, 'sumTakeProfit': 0, 'sumStopLoss': 0, 'sumHoldPeriod': 0, 'numDeals': 0}
currentDataNum, firstTime, preLastCandle = -1, True, False
for order in orderList:
if preLastCandle:
break
order = dict(zip(tickKeys, order))
mode = 'findOrder'
if firstTime or data['time'] <= order['datetime']:
while (not preLastCandle) and mode != 'Exit':
currentDataNum += 1
if currentDataNum >= len(dataNpy['time']) - 2:
preLastCandle = True
data = {key: dataNpy[key][currentDataNum] for key in dataKeys}
if mode == 'findOrder':
if data['time'] >= order['datetime']:
priceEnter = data['close']
numEnter = currentDataNum
datetimeEnter = data['time']
mode = 'doOrder'
elif mode == 'doOrder':
currentDatetime = data['time']
procentUp = data['high'] / priceEnter - 1.
procentDown = data['low'] / priceEnter - 1.
holdPeriod = order['holdPeriod']
isHoldPeriod = preLastCandle or (currentDataNum - numEnter + 1 > holdPeriod)
if order['direct'] == 'buy':
takeProfit = (procentUp >= order['takeProfit'])
stopLoss = (procentDown <= -order['stopLoss'])
else: # order['direct'] == 'sell
takeProfit = (procentDown <= -order['takeProfit'])
stopLoss = (procentUp >= order['stopLoss'])
if takeProfit or stopLoss or isHoldPeriod:
event = 'holdPeriod'
nextDatetime = dataNpy['time'][currentDataNum + 1]
nextClose = dataNpy['close'][currentDataNum + 1]
direct = {'buy': 1, 'sell': -1}[order['direct']]
procent = (nextClose / priceEnter - 1.) * direct - 2 * FEE
if takeProfit:
event = 'takeProfit'
if stopLoss:
event = 'stopLoss'
measure['deals'].append({
'procent': procent,
'event': event,
'direct': order['direct'],
'datetimeEnter': datetimeEnter,
'datetimeExit': nextDatetime,
'priceEnter': priceEnter,
'priceExit': nextClose,
'datetimeEnterInd': numEnter,
'datetimeExitInd': currentDataNum + 1,
})
mode = 'Exit'
firstTime = False
mapEventDirect = {'takeProfit': 'sumTakeProfit', 'holdPeriod': 'sumHoldPeriod', 'stopLoss': 'sumStopLoss'}
portfolio = []
for deal in measure['deals']:
portfolio.append(deal['procent'])
measure[mapEventDirect[deal['event']]] += deal['procent']
def calcMeasures(deals):
def maxDrawdown(array):
i = np.argmax(np.maximum.accumulate(array) - array) # end of the period
if i == 0:
return 0
j = np.argmax(array[:i]) # start of period
return array[j] - array[i]
res = {};
pnl = np.cumsum(deals)
res['std'] = np.std(pnl)
res['minV'] = min(np.min(pnl), 0)
res['maxDrawdown'] = maxDrawdown(pnl)
res['sumProcent'] = pnl[-1]
res['numDeals'] = len(portfolio)
return res
measure['sumProcent'] = measure['minV'] = measure['maxDrawdown'] = 0
measure['std'] = measure['numDeals'] = 0
if portfolio:
measureTest = calcMeasures(portfolio)
measure.update(measureTest)
toCSV = [deal for deal in measure['deals']]
fieldnames = ['datetimeEnter', 'direct', 'priceEnter', 'procent', 'event', 'datetimeExit', 'priceExit']
with open('data/order/{0}_{1}_order.csv'.format(ticker, _tickSize), 'w') as output_file:
dict_writer = csv.DictWriter(output_file, fieldnames=fieldnames, delimiter=';', extrasaction='ignore')
dict_writer.writeheader()
dict_writer.writerows(toCSV)
return measure
def plotChart(result, ticker):
for res in result:
if res['ticker'] == ticker:
break
_tickSize = res['_tickSize']
data = {key: np.load('data/{0}/{1}/{2}.npy'.format(ticker, _tickSize, key), encoding='bytes') for key in dataKeys }
N = len(data['time'])
ind = | np.arange(N) | numpy.arange |
# SpectralTools.py
# Collection of useful tools for dealing with spectra:
from __future__ import division
import time
import matplotlib.pyplot as plt
import numpy as np
from scipy.interpolate import interp1d
def BERVcorr(wl, Berv):
"""Barycentric Earth Radial Velocity correction from tapas.
A wavelength W0 of the original spectrum is seen at wavelength W1 in the spectrometer stretched by Doppler effect:
W1 / W0 = (1+ Vr/c) =(1- Berv/c)
Therefore, if there is a BERV correction in the pipeline of the spectrometer to recover W0, the measure wavelength
W1 has be transformed in W0 with the formula:
W0 = W1 (1+ Berv/c)
Inputs:
W1 - the spctrum in spectrograph frame.
Berv - The Barycentric Earth Radial Velocity value for this observation in km/s
Output:
W0. - BERV corrected wavelength values
Note:
pyasl.dopplerShift is much smoother than this function so use that instead.
"""
c = 299792.458 # km/s
return wl * (1 + Berv / c)
def inverse_BERV(w1, Berv):
"""Obtain un-BERV corrected wavelengths."""
c = 299792.458 # km/s
return w1 * (1 - Berv / c)
# def dopplershift(): # standard doppler correction
# return None
def air2vac(air):
"""Conversion of air wavelenghts to vacuum wavelenghts.
Adapted from wcal from Pat Hall's IRAF tasks for displaying SDSS spectra
Input: Air wavelengths in nm
Output: Vacuum wavelengths in nm
"""
print("Probably best to use pyastronomy versions !!!!!!!!!!!!!!!!!!!!!!!!")
air_in_angstroms = air * 10
sigma2 = (10**8) / air**2
n = 1 + 0.000064328 + 0.0294981 / (146 - sigma2) + 0.0002554 / (41 - sigma2)
vacuum_in_angstroms = air * n
if (min(air_in_angstroms) < 1600):
print("# WARNING! formula intended for use only at >1600 Ang!")
return vacuum_in_angstroms / 10
def vac2air(vac):
"""Conversion of vacuum wavelenghts to air wavelenghts.
From http://classic.sdss.org/dr7/products/spectra/vacwavelength.html
AIR = VAC / (1.0 + 2.735182E-4 + 131.4182 / VAC^2 + 2.76249E8 / VAC^4) given in Morton (1991, ApJS, 77, 119)
Better correction for infrared may be found in
http://adsabs.harvard.edu/abs/1966Metro...2...71E
and
http://adsabs.harvard.edu/abs/1972JOSA...62..958P
"""
print("Probably best to use pyastronomy versions !!!!!!!!!!!!!!!!!!!!!!!!")
vac_in_angstroms = vac * 10
air_in_angstroms = vac_in_angstroms / (1.0 + 2.735182E-4 + 131.4182 / vac_in_angstroms**2 +
2.76249E8 / vac_in_angstroms**4)
# Need to look at these for nir compatbile formular
return air_in_angstroms / 10
def wav_selector(wav, flux, wav_min, wav_max, verbose=False):
"""Fast Wavelength selector between wav_min and wav_max values.
If passed lists it will return lists.
If passed np arrays it will return arrays
"""
if isinstance(wav, list): # If passed lists
wav_sel = [wav_val for wav_val in wav if (wav_min < wav_val < wav_max)]
flux_sel = [flux_val for wav_val, flux_val in zip(wav, flux) if (wav_min < wav_val < wav_max)]
elif isinstance(wav, np.ndarray):
# Super Fast masking with numpy
mask = (wav > wav_min) & (wav < wav_max)
wav_sel = wav[mask]
flux_sel = flux[mask]
if verbose:
print("mask=", mask)
print("len(mask)", len(mask))
print("wav", wav)
print("flux", flux)
else:
raise TypeError("Unsupported input wav type")
return [wav_sel, flux_sel]
# Wavelength Interplation from telluric correct
def wl_interpolation(wl, spec, ref_wl, method="scipy", kind="linear", verbose=False):
"""Interpolate Wavelengths of spectra to common WL.
Most likely convert telluric to observed spectra wl after wl mapping performed
"""
v_print = print if verbose else lambda *a, **k: None
starttime = time.time()
if method == "scipy":
v_print(kind + " scipy interpolation")
linear_interp = interp1d(wl, spec, kind=kind)
new_spec = linear_interp(ref_wl)
elif method == "numpy":
if kind.lower() is not "linear":
v_print("Warning: Cannot do " + kind + " interpolation with numpy, switching to linear")
v_print("Linear numpy interpolation")
new_spec = np.interp(ref_wl, wl, spec) # 1-d peicewise linear interpolat
else:
v_print("Method was given as " + method)
raise("Interpolation method not correct")
v_print("Interpolation Time = " + str(time.time() - starttime) + " seconds")
return new_spec # test inperpolations
###################################################################
# Convolution
###################################################################
def unitary_Gauss(x, center, FWHM):
"""Gaussian_function of area=1.
p[0] = A
p[1] = mean
p[2] = FWHM
"""
sigma = np.abs(FWHM) / (2 * np.sqrt(2 * np.log(2)))
amp = 1.0 / (sigma * np.sqrt(2 * np.pi))
tau = -((x - center)**2) / (2 * (sigma**2))
result = amp * np.exp(tau)
return result
def fast_convolve(wav_val, R, wav_extended, flux_extended, fwhm_lim):
"""IP convolution multiplication step for a single wavelength value."""
FWHM = wav_val / R
index_mask = (wav_extended > (wav_val - fwhm_lim * FWHM)) & (wav_extended < (wav_val + fwhm_lim * FWHM))
flux_2convolve = flux_extended[index_mask]
IP = unitary_Gauss(wav_extended[index_mask], wav_val, FWHM)
sum_val = np.sum(IP * flux_2convolve)
unitary_val = np.sum(IP * np.ones_like(flux_2convolve)) # Effect of convolution onUnitary.
return sum_val / unitary_val
def instrument_convolution(wav, flux, chip_limits, R, fwhm_lim=5.0, plot=True, verbose=True):
"""Convolution code adapted from pedros code and speed up with np mask logic."""
# CRIRES HDR vals for chip limits don't match well with calibrated values (get interpolation out of range error)
# So will use limits from the obs data instead
# wav_chip, flux_chip = chip_selector(wav, flux, chip)
wav_chip, flux_chip = wav_selector(wav, flux, chip_limits[0], chip_limits[1])
# we need to calculate the FWHM at this value in order to set the starting point for the convolution
FWHM_min = wav_chip[0] / R # FWHM at the extremes of vector
FWHM_max = wav_chip[-1] / R
# wide wavelength bin for the resolution_convolution
wav_extended, flux_extended = wav_selector(wav, flux, wav_chip[0] - fwhm_lim * FWHM_min,
wav_chip[-1] + fwhm_lim * FWHM_max, verbose=False)
# isinstance check is ~100 * faster then arraying the array again.
if not isinstance(wav_extended, np.ndarray):
wav_extended = | np.array(wav_extended, dtype="float64") | numpy.array |
# -*- coding: utf-8 -*-
"""
Created on Wed May 3 09:57:52 2017
@author: Lab41: Github: Circulo/circulo/algorithms/rolx.py
#### https://github.com/Lab41/Circulo/blob/master/circulo/algorithms/rolx.py
Set of functions to compute the RolX featurization
"""
import sys
import math
import igraph
import numpy as np
from numpy.linalg import lstsq
from numpy import dot
from scipy.cluster.vq import kmeans2, vq
from scipy.linalg import norm
from scipy.optimize import minimize
from sklearn.decomposition import NMF
import networkx as nx
import numpy as np
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
from sklearn.cluster import KMeans
import matplotlib.pyplot as plt
import sklearn as sk
import pandas as pd
import torch
from utils.utils import read_real_datasets, NodeClassificationDataset, MLP, DataSplit
def extract_rolx_roles(G, roles=2):
"""
Top-level function. Extracts node-role matrix and sensemaking role-feature matrix as necessary.
"""
print("Creating Vertex Features matrix")
V = vertex_features(G)
#print("V is a %s by %s matrix." % V.shape)
basis, coef = get_factorization(V, roles)
H = basis
#print("Node-role matrix is of dimensions %s by %s" % H.shape)
#print(H)
K = make_sense(G, H)
#print("Role-feature matrix is of dimensions %s by %s" % K.shape)
#print(K)
return H, K
def extract_rolx_roles_bis(G,V, roles=2):
"""
Top-level function. Extracts node-role matrix and sensemaking role-feature matrix as necessary.
Inputs a matrux
"""
basis, coef = get_factorization(V, roles)
H = basis
print("Node-role matrix is of dimensions %s by %s" % H.shape)
#print(H)
K = make_sense(G, H)
print("Role-feature matrix is of dimensions %s by %s" % K.shape)
#print(K)
return H, K
def recursive_feature(G, f, n):
"""
G: iGraph graph with annotations
func: string containing function name
n: int, recursion level
Computes the given function recursively on each vertex
Current precondition: already have run the computation for G, func, n-1.
"""
return np.matrix(recursive_feature_array(G,f,n))
def recursive_feature_array(G, func, n):
"""
Computes recursive features of the graph G for the provided function of G, returning
the matrix representing the nth level of the recursion.
"""
attr_name = "_rolx_" + func.__name__ + "_" + str(n)
if attr_name in G.vs.attributes():
result = np.array(G.vs[attr_name])
return result
if n==0:
stats = func(G)
result = np.array([[x] for x in stats])
result = result * 1.0
G.vs[attr_name] = result
return result
prev_stats = recursive_feature_array(G, func, n-1)
all_neighbor_stats = []
for v in G.vs:
neighbors = G.neighbors(v)
degree = len(neighbors)
if degree == 0:
neighbor_avgs = neighbor_sums = np.zeros(prev_stats[0].size)
else:
prev_neighbor_stats = [prev_stats[x] for x in neighbors]
neighbor_sums_vec = sum(prev_neighbor_stats)
neighbor_avgs_vec = neighbor_sums_vec / degree
v_stats = np.concatenate((neighbor_sums_vec, neighbor_avgs_vec), axis=0)
all_neighbor_stats.append(v_stats)
G.vs[attr_name] = all_neighbor_stats
return all_neighbor_stats
def approx_linear_solution(w, A, threshold=1e-15):
'''
Checks if w is linearly dependent on the columns of A, this is done by solving the least squares problem (LSP)
min || w - Ax ||_2^2
x
and checking if || w - Ax_star || <= threshold, where x_star is the arg_minimizer of the LSP
w: column vector
A: matrix
threshold: int
'''
x0 = np.zeros(A.shape[1])
x_star, residuals, rank, s = lstsq(A, w)
norm_residual = norm(residuals)
result = True if norm_residual <= threshold else False
return (result, norm_residual, x_star)
def degree(G):
""" Auxiliary function to calculate the degree of each element of G. """
return G.degree()
def vertex_egonet(G, v):
""" Computes the number of edges in the ego network of the vertex v. """
ego_network = G.induced_subgraph(G.neighborhood(v))
ego_edges = ego_network.ecount()
return ego_edges
def egonet(G):
""" Computes the ego network for all vertices v in G. """
return [vertex_egonet(G, v) for v in G.vs]
def vertex_egonet_out(G, v):
""" Computes the outgoing edges from the ego network of the vertex v in G. """
neighbors = G.neighborhood(v)
ego_network = G.induced_subgraph(neighbors)
ego_edges = ego_network.ecount()
degree_sum = sum([G.degree(v) for v in neighbors])
out_edges = degree_sum - 2*ego_edges #Summing over degree will doublecount every edge within the ego network
return out_edges
def egonet_out(G):
""" Computes the number of outgoing ego network edges for every vertex in G. """
return [vertex_egonet_out(G, v) for v in G.vs]
def vertex_features(g):
"""
Constructs a vertex feature matrix using recursive feature generation, then uses least-squares solving
to eliminate those exhibiting approximate linear dependence.
"""
G = g.copy()
num_rows = G.vcount()
features = [degree, egonet, egonet_out]
V = np.matrix(np.zeros((num_rows, 16*len(features))))
next_feature_col = 0
for feature in features:
base = recursive_feature(G, feature, 0)
base = base/norm(base)
V = add_col(V, base, next_feature_col)
next_feature_col += 1
level = 1
accepted_features = True
while accepted_features:
accepted_features = False
feature_matrix = recursive_feature(G, feature, level)
rows, cols = feature_matrix.shape
for i in range(cols):
b = feature_matrix[:,i]
b = b/norm(b)
mat = V[:,:next_feature_col]
threshold = 10.0**(-15+level)
(is_approx_soln, _, _) = approx_linear_solution(b, mat, threshold)
if not is_approx_soln:
V = add_col(V, b, next_feature_col)
next_feature_col += 1
accepted_features = True
level += 1
return V[:, :next_feature_col]
def add_col(V, b, insert_col):
""" Add the given column b to the matrix V, enlarging the matrix if necessary. """
rows, cols = V.shape
if insert_col == cols: # need to resize V
zeros = np.matrix(np.zeros((rows, 1)))
V = np.concatenate((V, zeros), axis=1)
V[:, insert_col] = b
return V
def kmeans_quantize(M, bits):
""" Performs k-means quantization on the given matrix. Returns the encoded matrix and the number of bits needed for encoding it. """
k = 2**bits
obs = np.asarray(M).reshape(-1)
centroid, label = kmeans2(obs, k)
enc_M = [centroid[v] for v in label]
enc_M = np.matrix(enc_M).reshape(M.shape)
return enc_M, (bits * enc_M.size)
def kl_divergence(A,B):
""" Computes the Kullback-Leibler divergence of the two matrices A and B. """
a = np.asarray(A, dtype=np.float)
b = np.asarray(B, dtype=np.float)
return np.sum(np.where(a != 0, a * np.log(a / b), 0))
def description_length(V, fctr_res, bits=10):
""" Computes the length necessary to describe the given model with the given number of bits. """
W = fctr_res[0]
H = fctr_res[1]
enc_W, enc_W_cost = kmeans_quantize(W, bits)
enc_H, enc_H_cost = kmeans_quantize(H, bits)
enc_cost = enc_W_cost + enc_H_cost
err_cost = kl_divergence(V, enc_W*enc_H)
return enc_W, enc_H, enc_cost, err_cost
def standardize_rows(M):
""" Distribute the rows of the cost matrix normally to allow for accurate comparisons of error and description
cost. """
rv = np.matrix(M)
for i in range(rv.shape[0]):
mean = np.mean(M[i, :])
stdev = np.std(M[i, :])
rv[i, :]= (M[i, :]- mean)/stdev
return rv
# def standardize(M):
# m_flat = np.asarray(M).reshape(-1)
# mean = np.mean(m_flat)
# stdev = np.std(m_flat)
# m_flat = (m_flat - mean)/stdev
#
# return m_flat.reshape(M.shape)
def get_factorization(V, num_roles):
""" Obtains a nonnegative matrix factorization of the matrix V with num_roles intermediate roles. """
model = NMF(n_components=num_roles, init='random', random_state=0)
model.fit(V)
node_roles = model.transform(V)
role_features = model.components_
return torch.from_numpy(node_roles), torch.from_numpy(role_features)
def get_optimal_factorization(V, min_roles=2, max_roles=6, min_bits=1, max_bits=10):
""" Uses grid search to find the optimal parameter number and encoding of the given matrix factorization. """
max_roles = min(max_roles, V.shape[1]) # Can't have more possible roles than features
num_role_options = max_roles - min_roles
num_bit_options = max_bits - min_bits
mat_enc_cost = np.zeros((num_role_options, num_bit_options))
mat_err_cost = np.zeros((num_role_options, num_bit_options))
mat_fctr_res = [[0] * num_bit_options] * num_role_options
# Setup and run the factorization problem
for i in range(num_role_options):
rank = min_roles + i
fctr_res = get_factorization(V, rank)
for j in range(num_bit_options):
bits = min_bits + j
enc_W, enc_H, enc_cost, err_cost = description_length(V, fctr_res, bits)
mat_enc_cost[i,j] = enc_cost
mat_err_cost[i,j] = err_cost
mat_fctr_res[i][j] = (enc_W, enc_H)
mat_std_enc_cost = standardize_rows(mat_enc_cost)
mat_std_err_cost = standardize_rows(mat_err_cost)
mat_total_cost = mat_enc_cost + mat_err_cost
mat_total_std_cost = mat_std_enc_cost + mat_std_err_cost
# print mat_total_cost
print('min cost @', idx, ' or at ', min_coord)
print("rank, bits, enc_cost, err_cost, total_cost, std_enc_cost, std_err_cost, std_total_cost")
for i in range(num_role_options):
for j in range(num_bit_options):
rank = min_roles + i
bits = min_bits + j
enc_cost = mat_enc_cost[i,j]
err_cost = mat_err_cost[i,j]
std_enc_cost = mat_std_enc_cost[i,j]
std_err_cost = mat_std_err_cost[i,j]
total_cost = mat_total_cost[i,j]
total_std_cost = mat_total_std_cost[i,j]
print("%s, %s, (%s, %s, %s), (%s, %s, %s)" % (rank, bits,
enc_cost, err_cost, total_cost, std_enc_cost, std_err_cost, total_std_cost))
min_idx = mat_total_std_cost.argmin()
min_coord = np.unravel_index(min_idx, mat_total_std_cost.shape)
min_role_index, min_bit_index = min_coord
min_role_value = min_role_index + min_roles
min_bit_value = min_bit_index + min_bits
min_std_enc_cost = mat_std_enc_cost[min_coord]
min_std_err_cost = mat_std_err_cost[min_coord]
min_total_std_cost = mat_total_std_cost[min_coord]
print("%s, %s, (%s, %s, %s)" % (min_role_value, min_bit_value, min_std_enc_cost, min_std_err_cost, min_total_std_cost))
return mat_fctr_res[min_role_index][min_bit_index]
def make_sense(G, H):
""" Given graph G and node-role matrix H, returns a role-feature matrix K for sensemaking analyses of roles. """
features = [ 'betweenness', 'closeness', 'degree', 'diversity', 'eccentricity', 'pagerank', 'personalized_pagerank', 'strength' ]
feature_fns = [ getattr(G, f) for f in features ]
feature_matrix = [ func() for func in feature_fns ]
feature_matrix = np.matrix(feature_matrix).transpose()
#print(feature_matrix)
M = feature_matrix
for i in range(M.shape[1]):
M[:,i] = M[:,i] / norm(M[:,i])
K = complete_factor(H, M, h_on_left=True)
#print(K)
return K
def sense_residual_left_factor(W, H, M):
W = np.matrix(W).reshape((M.shape[0], H.shape[0]))
return norm(M - W*H)
def sense_residual_right_factor(K, H, M):
K = np.matrix(K).reshape((H.shape[1], M.shape[1]))
# print(M.shape,H.shape,K.shape)
return norm(M - H*K)
def complete_factor(H, M, h_on_left=True):
"""Given nonnegative matrix M and a nonnegative factor H of M, finds the other (nonnegative) factor of M.
H: known factor of matrix M.
M: product matrix.
h_on_left: boolean, true if H is the left factor of M, false if H is the right factor.
If H is left factor, find the matrix K such that HK=M. If H is the right factor, finds W such that WH=M
Result is an appropriately-sized matrix. """
if h_on_left:
shape = (H.shape[1], M.shape[1])
residual = sense_residual_right_factor
else:
shape = (M.shape[0], H.shape[0])
residual = sense_residual_left_factor
size = shape[0] * shape[1]
guess = | np.random.rand(size) | numpy.random.rand |
#!/usr/bin/env python
# Started: Jan 2015 (KDG)
# Revised to read in all the data from files: Mar 2016 (KDG)
# observed data defines the wavelength grids to fit on
"""
ObsData class
observed data that will be used to constrain the dust model
"""
from __future__ import print_function
import numpy as np
from astropy.table import Table
from astropy.io import fits
__all__ = ["ObsData"]
# Object for the observed dust data
class ObsData():
"""
ObsData Class
Parameters
----------
ext_filenames: list of 'string'
filenames with the observed extincction curve
avnhi_filenames: list of 'string'
filename with the observed A(V)/N(HI) value + unc
abund_filename: 'string'
filename with the observed atomic abundances
ir_emis_filename: 'string'
filename with the observed infrared dust emission
dust_scat_filename: 'string'
filename with the observed dust scattering (a, g) parameters
[currently not used - hard coded for MW diffuse - need to change]
ext_tags : list of 'string'
list of tags identifying the origin of the
dust extinction curve segments
Attributes
----------
alnhi : float
A(lamda)/N(HI) value for extinction curve
alnhi_unc : float
uncertainty in A(lamda)/N(HI) value for extinction curve
ext_waves : 'numpy.ndarray'
wavelengths for the extinction curve
ext_alav : 'numpy.ndarray'
extinction curve in A(lambda)/A(V) units
ext_alav_unc : 'numpy.ndarray'
extinction curve uncertainties in A(lambda)/A(V) units
ext_alnhi : 'numpy.ndarray'
extinction curve in A(lambda)/N(HI) units
ext_alnhi_unc : 'numpy.ndarray'
extinction curve uncertainties in A(lambda)/N(HI) units
ext_tags : 'numpy.ndarray'
string tags identifying the origin of the extinction curve measurement
"""
# read in the data from files
def __init__(self, ext_filenames, avnhi_filename,
abund_filename, ir_emis_filename,
dust_scat_filename, ext_tags=None,
scat_path="./"):
# extinction curve
self.fit_extinction = True
self.ext_waves = np.empty((0))
self.ext_alav = np.empty((0))
self.ext_alav_unc = np.empty((0))
self.ext_tags = []
if isinstance(ext_filenames, (list, tuple)):
for i, filename in enumerate(ext_filenames):
t = Table.read(filename, format='ascii.commented_header')
self.ext_waves = np.concatenate([self.ext_waves,
1.0/t['wave']])
self.ext_alav = np.concatenate([self.ext_alav,
t['A(l)/A(V)']])
self.ext_alav_unc = np.concatenate([self.ext_alav_unc,
t['unc']])
if ext_tags is not None:
cur_tag = ext_tags[i]
else:
cur_tag = 'Tag' + str(i+1)
self.ext_tags = self.ext_tags + len(t['wave'])*[cur_tag]
else:
# assume it is a FITS file (need to add checks)
hdulist = fits.open(ext_filenames)
for i in range(1, len(hdulist)):
t = hdulist[i].data
# hack to get AzV 215 to work
# need to get a better file format for FITS extinction curves
# units, etc.
trv = 3.65
ext = (t['EXT']/trv) + 1
ext_unc = t['UNC']/trv
# only keep positive measurements
gindxs, = np.where(ext > 0.0)
self.ext_waves = np.concatenate([self.ext_waves,
t['WAVELENGTH'][gindxs]])
self.ext_alav = np.concatenate([self.ext_alav, ext[gindxs]])
self.ext_alav_unc = np.concatenate([self.ext_alav_unc,
ext_unc[gindxs]])
self.ext_tags = self.ext_tags + \
len(t['WAVELENGTH'])*[hdulist[i].header['EXTNAME']]
hdulist.close()
# sort
sindxs = np.argsort(self.ext_waves)
self.ext_waves = self.ext_waves[sindxs]
self.ext_alav = self.ext_alav[sindxs]
self.ext_alav_unc = self.ext_alav_unc[sindxs]
self.ext_tags = np.array(self.ext_tags)[sindxs]
# normalization from A(V) to N(HI)
t = Table.read(avnhi_filename,
format='ascii.commented_header',
header_start=-1)
self.avnhi = t['Av_to_NHI'][0]
self.avnhi_unc = t['unc'][0]
# change the extinction normalization from A(V) to N(HI)
self.ext_alnhi = self.ext_alav*self.avnhi
self.ext_alnhi_unc = (np.square(self.ext_alav_unc/self.ext_alav)
+ np.square(self.avnhi_unc/self.avnhi))
self.ext_alnhi_unc = self.ext_alnhi*np.sqrt(self.ext_alnhi_unc)
# dust abundances
self.fit_abundance = False
if abund_filename is not None:
self.fit_abundance = True
t = Table.read(abund_filename, format='ascii.commented_header')
self.abundance = {}
self.total_abundance = {}
for i in range(len(t)):
self.abundance[t['atom'][i]] = (t['abund'][i],
t['abund_unc'][i])
self.total_abundance[t['atom'][i]] = (t['total_abund'][i],
t['total_abund_unc'][i])
# diffuse IR emission spectrum
self.fit_ir_emission = False
if ir_emis_filename is not None:
self.fit_ir_emission = True
t = Table.read(ir_emis_filename, format='ascii.commented_header')
self.ir_emission_waves = np.array(t['WAVE'])
self.ir_emission = np.array(t['SPEC'])/1e20
self.ir_emission_unc = np.array(t['ERROR'])/1e20
# check if any uncs are zero
gindxs, = np.where(self.ir_emission_unc == 0.0)
if len(gindxs) > 0:
self.ir_emission_unc[gindxs] = 0.1*self.ir_emission[gindxs]
# sort
sindxs = np.argsort(self.ir_emission_waves)
self.ir_emission_waves = self.ir_emission_waves[sindxs]
self.ir_emission = self.ir_emission[sindxs]
self.ir_emission_unc = self.ir_emission_unc[sindxs]
# dust albedo (Gordon et al. AoD proceedings)
self.fit_scat_a = False
self.fit_scat_g = False
if dust_scat_filename is not None:
self.fit_scat_a = True
self.fit_scat_g = True
files_dgl = ["mathis73", "morgan76", "lillie76", "toller81",
"murthy93", "murthy95", "petersohn97", "witt97",
"schiminovich01", "shalima04", "sujatha05",
"sujatha07", "sujatha10"]
scat_waves = []
scat_albedo = []
scat_albedo_unc = []
scat_g = []
scat_g_unc = []
scat_ref = []
for sfile in files_dgl:
f = open(scat_path + sfile + '.dat', 'r')
ref = f.readline().rstrip()
f.close()
t = Table.read(scat_path+sfile+'.dat',
format='ascii',
header_start=1)
for k in range(len(t)):
scat_waves.append(t['wave,'][k])
scat_albedo.append(t['albedo,'][k])
scat_albedo_unc.append(t['delta,'][k])
scat_g.append(t['g,'][k])
scat_g_unc.append(t['delta'][k])
scat_ref.append(ref)
# remove all the measurements with zero uncertainty
gindxs, = np.where( | np.array(scat_albedo_unc) | numpy.array |
import cv2
import numpy as np
def find_color(roi, colors, threshold=0.0):
"""
Generates a percentage of each jersey color in the roi. If at least one is
above threshold, returns the color with the highest percentage, otherwise
returns None.
roi: image or region of interest within image
colors: list of colors as strings in order [Team1, Team2, ref or other]
threshold: minimum fraction of image containing either of the colors necessary to be detected
"""
color_dict = {'white': [[80, 0, 100], [180, 100, 255]],
'red': [[170, 50, 50], [180, 255, 255], [0, 50, 50], [10, 255, 255]],
'yellow': [[20, 50, 50], [30, 255, 255]],
'green': [[50, 50, 50], [80, 255, 255]],
'blue': [[81, 50, 50], [125, 255, 255]],
'light blue': [[90, 100, 50], [125, 255, 180]],
'dark blue': [[106, 50, 50], [125, 255, 255]],
'purple': [[126, 50, 50], [140, 255, 255]],
'pink': [[141, 50, 50], [169, 255, 255]],
'orange': [[8, 50, 50], [40, 255, 255]],
'black': [[0, 0, 0], [180, 40, 20]],
'grey': [[0, 0, 21], [180, 25, 160]]}
roi_hsv = cv2.cvtColor(roi, cv2.COLOR_RGB2HSV)
total_pixels = roi.shape[0] * roi.shape[1]
highest_color = None
highest_perc = 0
for color in colors:
if color in color_dict.keys():
# red is on the edge of HSV range, so needs a combination of two masks
if color == 'red':
lower1 = np.array(color_dict[color][0])
upper1 = np.array(color_dict[color][1])
lower2 = | np.array(color_dict[color][2]) | numpy.array |
# 对数据集中的点云,批量执行构建树和查找,包括kdtree和octree,并评测其运行时间
import random
import math
import numpy as np
import time
import os
import struct
from scipy.spatial import KDTree
import octree as octree
import kdtree as kdtree
from result_set import KNNResultSet, RadiusNNResultSet
np.seterr(all='raise')
def read_velodyne_bin(path):
'''
:param path:
:return: homography matrix of the point cloud, N*3
'''
pc_list = []
with open(path, 'rb') as f:
content = f.read()
pc_iter = struct.iter_unpack('ffff', content)
for idx, point in enumerate(pc_iter):
pc_list.append([point[0], point[1], point[2]])
return np.asarray(pc_list, dtype=np.float32)
def main():
# configuration
leaf_size = 32
min_extent = 0.0001
k = 8
radius = 1
# root_dir = '/Users/renqian/cloud_lesson/kitti' # 数据集路径
root_dir = './data' # 数据集路径
cat = os.listdir(root_dir)
iteration_num = len(cat)
print("scipy ---------------")
construction_time_sum = 0
knn_time_sum = 0
radius_time_sum = 0
brute_time_sum = 0
for i in range(iteration_num):
filename = os.path.join(root_dir, cat[i])
db_np = read_velodyne_bin(filename)
begin_t = time.time()
root = KDTree(db_np, leaf_size)
construction_time_sum += time.time() - begin_t
begin_t = time.time()
query = db_np[0, :]
result_set = KNNResultSet(capacity=k)
distance, indices = root.query(x=query, k=k)
output = ''
for i, item in enumerate(zip(indices, distance)):
output += '%d - %.2f\n' % (item[0], item[1])
# print(output)
knn_time_sum += time.time() - begin_t
begin_t = time.time()
indices = root.query_ball_point(query, radius)
output = ''
for i, index in enumerate(indices):
output += '%d - %.2f\n' % (index, np.linalg.norm(db_np[index] - query))
# print(output)
radius_time_sum += time.time() - begin_t
begin_t = time.time()
diff = np.linalg.norm(np.expand_dims(query, 0) - db_np, axis=1)
nn_idx = np.argsort(diff)
nn_dist = diff[nn_idx]
brute_time_sum += time.time() - begin_t
print("Scipy: build %.3f, knn %.3f, radius %.3f, brute %.3f" % (construction_time_sum*1000/iteration_num,
knn_time_sum*1000/iteration_num,
radius_time_sum*1000/iteration_num,
brute_time_sum*1000/iteration_num))
sci_knn = knn_time_sum
sci_radius = radius_time_sum
sci_brute = brute_time_sum
print("octree --------------")
construction_time_sum = 0
knn_time_sum = 0
radius_time_sum = 0
brute_time_sum = 0
for i in range(iteration_num):
filename = os.path.join(root_dir, cat[i])
db_np = read_velodyne_bin(filename)
begin_t = time.time()
root = octree.octree_construction(db_np, leaf_size, min_extent)
construction_time_sum += time.time() - begin_t
query = db_np[0,:]
begin_t = time.time()
result_set = KNNResultSet(capacity=k)
octree.octree_knn_search(root, db_np, result_set, query)
# print(result_set)
knn_time_sum += time.time() - begin_t
begin_t = time.time()
result_set = RadiusNNResultSet(radius=radius)
octree.octree_radius_search_fast(root, db_np, result_set, query)
# print(result_set)
radius_time_sum += time.time() - begin_t
begin_t = time.time()
diff = np.linalg.norm(np.expand_dims(query, 0) - db_np, axis=1)
nn_idx = np.argsort(diff)
nn_dist = diff[nn_idx]
brute_time_sum += time.time() - begin_t
print("Octree: build %.3f, knn %.3f, radius %.3f, brute %.3f" % (construction_time_sum*1000/iteration_num,
knn_time_sum*1000/iteration_num,
radius_time_sum*1000/iteration_num,
brute_time_sum*1000/iteration_num))
oct_knn = knn_time_sum
oct_radius = radius_time_sum
oct_brute = brute_time_sum
print("kdtree1 --------------")
construction_time_sum = 0
knn_time_sum = 0
radius_time_sum = 0
brute_time_sum = 0
for i in range(iteration_num):
filename = os.path.join(root_dir, cat[i])
db_np = read_velodyne_bin(filename)
begin_t = time.time()
root = kdtree.kdtree_construction(db_np, leaf_size)
construction_time_sum += time.time() - begin_t
query = db_np[0,:]
begin_t = time.time()
result_set = KNNResultSet(capacity=k)
kdtree.kdtree_knn_search(root, db_np, result_set, query)
# print(result_set)
knn_time_sum += time.time() - begin_t
begin_t = time.time()
result_set = RadiusNNResultSet(radius=radius)
kdtree.kdtree_radius_search(root, db_np, result_set, query)
# print(result_set)
radius_time_sum += time.time() - begin_t
begin_t = time.time()
diff = np.linalg.norm(np.expand_dims(query, 0) - db_np, axis=1)
nn_idx = | np.argsort(diff) | numpy.argsort |
import numpy as np
from numpy.testing import assert_array_equal, assert_array_almost_equal
from scipy.spatial.transform import Rotation
from tadataka.matrix import motion_matrix
from tadataka.rigid_transform import (inv_transform_all, transform_all,
transform_each, Transform, transform_se3)
def test_transform_each():
points = np.array([
[1, 2, 5],
[4, -2, 3],
])
rotations = np.array([
[[1, 0, 0],
[0, 0, -1],
[0, 1, 0]],
[[0, 0, -1],
[0, 1, 0],
[1, 0, 0]]
])
translations = np.array([
[1, 2, 3],
[4, 5, 6]
])
expected = np.array([
[2, -3, 5], # [ 1, -5, 2] + [ 1, 2, 3]
[1, 3, 10] # [ -3, -2, 4] + [ 4, 5, 6]
])
assert_array_equal(
transform_each(rotations, translations, points),
expected
)
def test_transform_all():
points = np.array([
[1, 2, 5],
[4, -2, 3],
[0, 0, 6]
])
rotations = np.array([
[[1, 0, 0],
[0, 0, -1],
[0, 1, 0]],
[[0, 0, -1],
[0, 1, 0],
[1, 0, 0]]
])
translations = np.array([
[1, 2, 3],
[4, 5, 6]
])
expected = np.array([
[[2, -3, 5], # [ 1, -5, 2] + [ 1, 2, 3]
[5, -1, 1], # [ 4, -3, -2] + [ 1, 2, 3]
[1, -4, 3]], # [ 0, -6, 0] + [ 1, 2, 3]
[[-1, 7, 7], # [-5, 2, 1] + [ 4, 5, 6]
[1, 3, 10], # [-3, -2, 4] + [ 4, 5, 6]
[-2, 5, 6]] # [-6, 0, 0] + [ 4, 5, 6]
])
assert_array_equal(transform_all(rotations, translations, points),
expected)
def test_inv_transform_all():
points = np.array([
[1, 2, 5],
[4, -2, 3],
[0, 0, 6]
])
rotations = np.array([
[[1, 0, 0],
[0, 0, -1],
[0, 1, 0]],
[[0, 0, -1],
[0, 1, 0],
[1, 0, 0]]
])
# [R.T for R in rotations]
# [[1, 0, 0],
# [0, 0, 1],
# [0, -1, 0]],
# [[0, 0, 1],
# [0, 1, 0],
# [-1, 0, 0]]
translations = np.array([
[1, 2, 3],
[4, 5, 6]
])
# p - t
# [[0, 0, 2],
# [3, -4, 0],
# [-1, -2, 3]],
# [[-3, -3, -1],
# [0, -7, -3],
# [-4, -5, 0]]
# np.dot(R.T, p-t)
expected = np.array([
[[0, 2, 0],
[3, 0, 4],
[-1, 3, 2]],
[[-1, -3, 3],
[-3, -7, 0],
[0, -5, 4]]
])
assert_array_equal(inv_transform_all(rotations, translations, points),
expected)
def test_transform_class():
P = np.array([
[1, 2, 5],
[4, -2, 3],
])
R = np.array([
[1, 0, 0],
[0, 0, -1],
[0, 1, 0]
])
t = np.array([1, 2, 3])
assert_array_equal(
Transform(R, t, s=1.0)(P),
[[2, -3, 5], # [ 1 -5 2] + [ 1 2 3]
[5, -1, 1]] # [ 4 -3 -2] + [ 1 2 3]
)
assert_array_equal(
Transform(R, t, s=0.1)(P),
[[1.1, 1.5, 3.2], # [ 0.1 -0.5 0.2] + [ 1 2 3]
[1.4, 1.7, 2.8]] # [ 0.4 -0.3 -0.2] + [ 1 2 3]
)
def test_transform_se3():
R_10 = | np.random.random((3, 3)) | numpy.random.random |
# -*- coding: utf-8 -*-
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import scipy.stats
from ..stats import mad, summary_plot
from .hrv_utils import _hrv_get_rri, _hrv_sanitize_input
def hrv_time(peaks, sampling_rate=1000, show=False, **kwargs):
"""Computes time-domain indices of Heart Rate Variability (HRV).
See references for details.
Parameters
----------
peaks : dict
Samples at which cardiac extrema (i.e., R-peaks, systolic peaks) occur.
Can be a list of indices or the output(s) of other functions such as ecg_peaks,
ppg_peaks, ecg_process or bio_process.
sampling_rate : int, optional
Sampling rate (Hz) of the continuous cardiac signal in which the peaks occur. Should be at
least twice as high as the highest frequency in vhf. By default 1000.
show : bool
If True, will plot the distribution of R-R intervals.
Returns
-------
DataFrame
Contains time domain HRV metrics:
- **MeanNN**: The mean of the RR intervals.
- **SDNN**: The standard deviation of the RR intervals.
-**SDANN1**, **SDANN2**, **SDANN5**: The standard deviation of average RR intervals extracted from n-minute segments of
time series data (1, 2 and 5 by default). Note that these indices require a minimal duration of signal to be computed
(3, 6 and 15 minutes respectively) and will be silently skipped if the data provided is too short.
-**SDNNI1**, **SDNNI2**, **SDNNI5**: The mean of the standard deviations of RR intervals extracted from n-minute
segments of time series data (1, 2 and 5 by default). Note that these indices require a minimal duration of signal to
be computed (3, 6 and 15 minutes respectively) and will be silently skipped if the data provided is too short.
- **RMSSD**: The square root of the mean of the sum of successive differences between
adjacent RR intervals. It is equivalent (although on another scale) to SD1, and
therefore it is redundant to report correlations with both (Ciccone, 2017).
- **SDSD**: The standard deviation of the successive differences between RR intervals.
- **CVNN**: The standard deviation of the RR intervals (SDNN) divided by the mean of the RR
intervals (MeanNN).
- **CVSD**: The root mean square of the sum of successive differences (RMSSD) divided by the
mean of the RR intervals (MeanNN).
- **MedianNN**: The median of the absolute values of the successive differences between RR intervals.
- **MadNN**: The median absolute deviation of the RR intervals.
- **HCVNN**: The median absolute deviation of the RR intervals (MadNN) divided by the median
of the absolute differences of their successive differences (MedianNN).
- **IQRNN**: The interquartile range (IQR) of the RR intervals.
- **pNN50**: The proportion of RR intervals greater than 50ms, out of the total number of RR intervals.
- **pNN20**: The proportion of RR intervals greater than 20ms, out of the total number of RR intervals.
- **TINN**: A geometrical parameter of the HRV, or more specifically, the baseline width of
the RR intervals distribution obtained by triangular interpolation, where the error of least
squares determines the triangle. It is an approximation of the RR interval distribution.
- **HTI**: The HRV triangular index, measuring the total number of RR intervals divded by the
height of the RR intervals histogram.
See Also
--------
ecg_peaks, ppg_peaks, hrv_frequency, hrv_summary, hrv_nonlinear
Examples
--------
>>> import neurokit2 as nk
>>>
>>> # Download data
>>> data = nk.data("bio_resting_5min_100hz")
>>>
>>> # Find peaks
>>> peaks, info = nk.ecg_peaks(data["ECG"], sampling_rate=100)
>>>
>>> # Compute HRV indices
>>> hrv = nk.hrv_time(peaks, sampling_rate=100, show=True)
References
----------
- <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., & <NAME>.
(2017). Reminder: RMSSD and SD1 are identical heart rate variability metrics. Muscle & nerve,
56(4), 674-678.
- <NAME>. (2002). Assessing heart rate variability from real-world Holter reports. Cardiac
electrophysiology review, 6(3), 239-244.
- <NAME>., & <NAME>. (2017). An overview of heart rate variability metrics and norms.
Frontiers in public health, 5, 258.
"""
# Sanitize input
peaks = _hrv_sanitize_input(peaks)
if isinstance(peaks, tuple): # Detect actual sampling rate
peaks, sampling_rate = peaks[0], peaks[1]
# Compute R-R intervals (also referred to as NN) in milliseconds
rri = _hrv_get_rri(peaks, sampling_rate=sampling_rate, interpolate=False)
diff_rri = np.diff(rri)
out = {} # Initialize empty container for results
# Deviation-based
out["MeanNN"] = np.nanmean(rri)
out["SDNN"] = np.nanstd(rri, ddof=1)
for i in [1, 2, 5]:
out["SDANN" + str(i)] = _sdann(rri, sampling_rate, window=i)
out["SDNNI" + str(i)] = _sdnni(rri, sampling_rate, window=i)
# Difference-based
out["RMSSD"] = np.sqrt(np.mean(diff_rri ** 2))
out["SDSD"] = np.nanstd(diff_rri, ddof=1)
# Normalized
out["CVNN"] = out["SDNN"] / out["MeanNN"]
out["CVSD"] = out["RMSSD"] / out["MeanNN"]
# Robust
out["MedianNN"] = np.nanmedian(rri)
out["MadNN"] = mad(rri)
out["MCVNN"] = out["MadNN"] / out["MedianNN"] # Normalized
out["IQRNN"] = scipy.stats.iqr(rri)
# Extreme-based
nn50 = np.sum(np.abs(diff_rri) > 50)
nn20 = np.sum(np.abs(diff_rri) > 20)
out["pNN50"] = nn50 / len(rri) * 100
out["pNN20"] = nn20 / len(rri) * 100
# Geometrical domain
if "binsize" in kwargs:
binsize = kwargs["binsize"]
else:
binsize = (1 / 128) * 1000
bins = np.arange(0, np.max(rri) + binsize, binsize)
bar_y, bar_x = np.histogram(rri, bins=bins)
# HRV Triangular Index
out["HTI"] = len(rri) / np.max(bar_y)
# Triangular Interpolation of the NN Interval Histogram
out["TINN"] = _hrv_TINN(rri, bar_x, bar_y, binsize)
if show:
_hrv_time_show(rri, **kwargs)
out = pd.DataFrame.from_dict(out, orient="index").T.add_prefix("HRV_")
return out
# =============================================================================
# Utilities
# =============================================================================
def _hrv_time_show(rri, **kwargs):
fig = summary_plot(rri, **kwargs)
plt.xlabel("R-R intervals (ms)")
fig.suptitle("Distribution of R-R intervals")
return fig
def _sdann(rri, sampling_rate, window=1):
window_size = window * 60 * 1000 # Convert window in min to ms
n_windows = int(np.round(np.cumsum(rri)[-1] / window_size))
if n_windows < 3:
return np.nan
rri_cumsum = np.cumsum(rri)
avg_rri = []
for i in range(n_windows):
start = i * window_size
start_idx = np.where(rri_cumsum >= start)[0][0]
end_idx = np.where(rri_cumsum < start + window_size)[0][-1]
avg_rri.append(np.mean(rri[start_idx:end_idx]))
sdann = | np.nanstd(avg_rri, ddof=1) | numpy.nanstd |
import argparse
import pandas as pd
import os
import sys
import numpy as np
import torch
from utils import computeMetricsAlt, evalThresholdAlt
from ModelShokri import DataHandler, TrainWBAttacker
from torch.utils.data import DataLoader
parser = argparse.ArgumentParser(description='Analyse criteria obtained from different MIAs.')
parser.add_argument('--model_type', type=str, help='Model Architecture to attack.')
parser.add_argument('--num_iters', type=int, default=20, help='Number of iterations for empirical estimation.')
parser.add_argument('--working_dir', type=str, default='./', help='Where to collect and store data.')
exp_parameters = parser.parse_args()
currdir = exp_parameters.working_dir
num_runs_for_random = exp_parameters.num_iters
model_type = exp_parameters.model_type
# Extracting intermediate outputs and gradients of the model
InterOuts_Grads0 = np.load(currdir + '/RawResults/NasrTrain0_' + model_type + '.npz')
InterOuts_Grads1 = np.load(currdir + '/RawResults/NasrTrain1_' + model_type + '.npz')
AdditionalInfo = np.load(currdir + '/RawResults/NasrAddInfo_' + model_type + '.npz')
inter_outs0 = []
inter_outs1 = []
out_size_list = AdditionalInfo['arr_0']
layer_size_list = AdditionalInfo['arr_1']
kernel_size_list = AdditionalInfo['arr_2']
n_inter_outputs = len(out_size_list)
n_layer_grads = len(kernel_size_list)
for i in range(n_inter_outputs):
inter_outs0.append(InterOuts_Grads0['arr_' + str(i)])
inter_outs1.append(InterOuts_Grads1['arr_' + str(i)])
lossval0 = InterOuts_Grads0['arr_' + str(n_inter_outputs)]
lossval1 = InterOuts_Grads1['arr_' + str(n_inter_outputs)]
labels1hot0 = InterOuts_Grads0['arr_' + str(n_inter_outputs + 1)]
labels1hot1 = InterOuts_Grads1['arr_' + str(n_inter_outputs + 1)]
grad_vals0 = []
grad_vals1 = []
for i in range(n_inter_outputs + 2, n_inter_outputs + 2 + n_layer_grads, 1):
grad_vals0.append(InterOuts_Grads0['arr_' + str(i)])
grad_vals1.append(InterOuts_Grads1['arr_' + str(i)])
# Our Analysis
FPR = np.linspace(0, 1, num=1001)
try:
dfMetricsBalanced = pd.read_csv(currdir + '/CompleteResults/BalancedMetrics_' + model_type + '.csv')
dfTPRBalanced = pd.read_csv(currdir + '/CompleteResults/BalancedROC_' + model_type + '.csv')
except FileNotFoundError:
dfMetricsBalanced = pd.DataFrame(columns=['Attack Strategy',
'AUROC', 'AUROC STD',
'Best Accuracy', 'Best Accuracy STD',
'FPR at TPR80', 'FPR at TPR80 STD',
'FPR at TPR85', 'FPR at TPR85 STD',
'FPR at TPR90', 'FPR at TPR90 STD',
'FPR at TPR95', 'FPR at TPR95 STD'])
dfTPRBalanced = pd.DataFrame(FPR, columns=['FPR'])
aux_list_metrics = []
aux_list_TPR = []
for k in range(num_runs_for_random):
np.random.seed(k)
indx_train0 = np.random.choice(lossval0.shape[0], size=4000, replace=False)
indx_train1 = np.random.choice(lossval1.shape[0], size=4000, replace=False)
indx_test0 = np.setdiff1d(np.arange(lossval0.shape[0]), indx_train0)
indx_test0 = np.random.choice(indx_test0, size=6000, replace=False)
indx_test1 = np.setdiff1d(np.arange(lossval1.shape[0]), indx_train1)
indx_test1 = np.random.choice(indx_test1, size=6000, replace=False)
trainingData = DataHandler(inter_outs0, inter_outs1, lossval0, lossval1, labels1hot0, labels1hot1,
grad_vals0, grad_vals1, indx_train0, indx_train1)
Max = trainingData.Max
Min = trainingData.Min
testingData = DataHandler(inter_outs0, inter_outs1, lossval0, lossval1, labels1hot0, labels1hot1,
grad_vals0, grad_vals1, indx_test0, indx_test1, Max=Max, Min=Min)
AttackerShokri = TrainWBAttacker(trainingData, testingData, out_size_list, layer_size_list, kernel_size_list)
dataloaderEval = DataLoader(testingData, batch_size=100, shuffle=False)
scoresEval = []
EvalY = []
with torch.no_grad():
for i, batch in enumerate(dataloaderEval):
example = batch[0]
target = batch[1]
scoresEval.append(AttackerShokri(*example).detach())
EvalY.append(target.cpu().data.numpy())
scoresEval = torch.cat(scoresEval, axis=0)
scoresEval = torch.squeeze(scoresEval)
scoresEval = scoresEval.cpu().data.numpy()
EvalY = np.squeeze(np.concatenate(EvalY, axis=0))
TPR_, metrics_ = computeMetricsAlt(scoresEval, EvalY, FPR)
aux_list_metrics.append(metrics_)
aux_list_TPR.append(TPR_)
metrics = np.stack(aux_list_metrics, 1)
mean_metrics = np.mean(metrics, 1)
std_metrics = np.std(metrics, 1)
new_row = {"Attack Strategy": 'Nasr White-Box',
'AUROC': mean_metrics[0], 'AUROC STD': std_metrics[0],
'Best Accuracy': mean_metrics[1], 'Best Accuracy STD': std_metrics[1],
'FPR at TPR80': mean_metrics[2], 'FPR at TPR80 STD': std_metrics[2],
'FPR at TPR85': mean_metrics[3], 'FPR at TPR85 STD': std_metrics[3],
'FPR at TPR90': mean_metrics[4], 'FPR at TPR90 STD': std_metrics[4],
'FPR at TPR95': mean_metrics[5], 'FPR at TPR95 STD': std_metrics[5]}
dfMetricsBalanced = dfMetricsBalanced.append(new_row, ignore_index=True)
TPR = np.stack(aux_list_TPR, 1)
mean_TPR = np.mean(TPR, 1)
std_TPR = np.std(TPR, 1)
dfTPRaux = pd.DataFrame(np.stack((mean_TPR, std_TPR), axis=1), columns=['Nasr White-Box TPR',
'Nasr White-Box TPR STD'])
dfTPRBalanced = dfTPRBalanced.join(dfTPRaux)
# Rezaei Analysis
try:
dfMetricsRezaei = pd.read_csv(currdir + '/CompleteResults/RezaeiMetrics_' + model_type + '.csv')
except FileNotFoundError:
dfMetricsRezaei = pd.DataFrame(columns=['Attack Strategy',
'Best Accuracy', 'Best Accuracy STD',
'FPR', 'FPR STD'])
aux_list_metrics = []
for k in range(num_runs_for_random):
np.random.seed(k)
indx_train0 = np.random.choice(lossval0.shape[0], size=8000, replace=False)
indx_train1 = np.random.choice(lossval1.shape[0], size=40000, replace=False)
indx_test0 = np.setdiff1d( | np.arange(lossval0.shape[0]) | numpy.arange |
'''Train CIFAR10 with PyTorch.'''
# Cifar-10 dataset을 closed-set으로 학습을 시키고 SVHN test dataset을 openset으로 Test하는 코드입니다.
# SVHN 데이터셋은 검색해보시면 어떠한 데이터셋인지 나올 겁니다.
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import torch.backends.cudnn as cudnn
import torchvision
import torchvision.transforms as transforms
import torchvision.models as models
import os
import argparse
from models import *
from utils import progress_bar
import numpy as np
import copy
from sklearn import metrics
from cb_loss import CB_loss
parser = argparse.ArgumentParser(description='PyTorch CIFAR10 Training')
parser.add_argument('--lr', default=0.1, type=float, help='learning rate')
parser.add_argument('--total_epoch', default=100, type=int, help='total epoch')
parser.add_argument('--interval', default=10, type=int, help="save interval")
parser.add_argument('--resume', '-r', action='store_true',
help='resume from checkpoint')
parser.add_argument('--num_cls', default=2, type=int, help="num classes")
parser.add_argument('--temperature', default=1.0, type=float, help="temperature scaling")
parser.add_argument('--flood_level', default=0.0, type=float, help="flood level")
parser.add_argument('--ensemble', default=1, type=int, help="ensemble test mode")
args = parser.parse_args()
device = 'cuda' if torch.cuda.is_available() else 'cpu'
best_acc = 0 # best test accuracy
start_epoch = 0 # start from epoch 0 or last checkpoint epoch
# Data
print('==> Preparing data..')
transform_train = transforms.Compose([
transforms.Resize((224,224)),
transforms.RandomHorizontalFlip(),
transforms.RandomRotation(90),
transforms.ToTensor(),
#transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
transform_test = transforms.Compose([
transforms.Resize((224,224)),
transforms.ToTensor(),
#transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
transform_crop = transforms.Compose([
transforms.RandomCrop((224,224)),
transforms.ToTensor(),
])
# -----------------------------------------------------------------------------------
batch_size=128
num_classes =args.num_cls
T = args.temperature
flood = args.flood_level
total_epoch= args.total_epoch
interval = args.interval
ensemble=args.ensemble
trainset = torchvision.datasets.ImageFolder(
root='./data/custom2/train', transform=transform_train)
trainloader = torch.utils.data.DataLoader(
trainset ,batch_size=256, shuffle=True,num_workers=0)
testset = torchvision.datasets.ImageFolder(
root='./data/custom2/test', transform=transform_test)
testloader = torch.utils.data.DataLoader(
testset, batch_size=256, shuffle=False, num_workers=0)
cropset = torchvision.datasets.ImageFolder(
root='./data/custom2/test', transform=transform_crop)
croploader = torch.utils.data.DataLoader(
cropset, batch_size=256, shuffle=False, num_workers=0)
# --------------------------------------------------------------------------------
# 학습 Model을 정의하는 부분입니다. Resnet18을 사용하겠습니다.
print('==> Building model..')
# net = VGG('VGG19')
#net = ResNet18()
# net = PreActResNet18()
# net = GoogLeNet()
# net = DenseNet121()
# net = ResNeXt29_2x64d()
# net = MobileNet()
# net = MobileNetV2()
# net = DPN92()
# net = ShuffleNetG2()
# net = SENet18()
# net = ShuffleNetV2(1)
# net = EfficientNetB0()
# net = RegNetX_200MF()
# net = SimpleDLA()
net = models.resnet18(pretrained=True)
net.fc =nn.Linear(512,num_classes)
net = net.to(device)
# if device == 'cuda':
# net = torch.nn.DataParallel(net)
# cudnn.benchmark = True
# 저장된 모델을 load하는 부분입니다.
# ----------------------------------------------------------------------------------
if args.resume:
# Load checkpoint.
print('==> Resuming from checkpoint..')
assert os.path.isdir('checkpoint'), 'Error: no checkpoint directory found!'
checkpoint = torch.load('./checkpoint/ckpt.pth')
net.load_state_dict(checkpoint['net'])
best_acc = checkpoint['acc']
start_epoch = checkpoint['epoch']
# ----------------------------------------------------------------------------------
# loss function 및 optimizaer, learning rate scheduler를 정의하는 부분입니다.
# -------------------------------------------------------------------------------------
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr=args.lr,
momentum=0.9, weight_decay=5e-4)
scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=100, gamma=0.1)
# --------------------------------------------------------------------------------------
def get_lr(optimizer):
for param_group in optimizer.param_groups:
return param_group['lr']
def get_sample_per_cls(trainloader,num_classes):
train_sample_fname = trainloader.dataset.samples
train_sample_fname = np.asarray([ | np.asarray(i) | numpy.asarray |
##############################################
# -------- Import Libraries --------#
#############################################
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from statsmodels.tsa.seasonal import STL
from statsmodels.tsa.stattools import adfuller
from statsmodels.tsa.stattools import kpss
import scipy.signal as signal
print("#" * 100)
with np.errstate(divide='ignore'):
np.float64(1.0) / 0.0
##############################################
# -------- Datetime Transformer --------#
#############################################
def datetime_transformer(df, datetime_vars):
"""
The datetime transformer
Parameters
----------
df : the dataframe
datetime_vars : the datetime variables
Returns
----------
The dataframe where datetime_vars are transformed into the following 6 datetime types:
year, month, day, hour, minute and second
"""
# The dictionary with key as datetime type and value as datetime type operator
dict_ = {'year': lambda x: x.dt.year,
'month': lambda x: x.dt.month,
'day': lambda x: x.dt.day
# ,
# 'hour': lambda x: x.dt.hour,
# 'minute': lambda x: x.dt.minute,
# 'second': lambda x: x.dt.second
}
# Make a copy of df
df_datetime = df.copy(deep=True)
# For each variable in datetime_vars
for var in datetime_vars:
# Cast the variable to datetime
df_datetime[var] = pd.to_datetime(df_datetime[var])
# For each item (datetime_type and datetime_type_operator) in dict_
for datetime_type, datetime_type_operator in dict_.items():
# Add a new variable to df_datetime where:
# the variable's name is var + '_' + datetime_type
# the variable's values are the ones obtained by datetime_type_operator
df_datetime[var + '_' + datetime_type] = datetime_type_operator(df_datetime[var])
# Remove datetime_vars from df_datetime
# df_datetime = df_datetime.drop(columns=datetime_vars)
return df_datetime
##############################################
# -------- Auto Correlation Function --------#
#############################################
def auto_corr_func_lags(y_tt, lags):
ry = []
l = []
den = 0
y_bar = np.mean(y_tt)
for i in range(len(y_tt)):
den += (y_tt[i] - y_bar) ** 2
for k in range(0, lags + 1):
num = 0
for j in range(k, len(y_tt)):
num += (y_tt[j] - y_bar) * (y_tt[j - k] - y_bar)
acf = num / den
ry.append(acf)
l.append(k)
ryy = ry[::-1]
ry_f = ryy[:-1] + ry
ll = l[::-1]
ll = [li * -1 for li in ll]
l_f = ll[:-1] + l
return ry_f, l_f
##############################################
# -------- Rolling Mean and Variance --------#
#############################################
def cal_rolling_mean_var(df, col, mean_or_var):
lst = []
lst1 = []
for i in range(0, len(df)):
mean = 0
var = 0
if i == 0:
mean += df[col][i]
var = 0
else:
for j in range(0, i + 1):
mean += df[col][j]
mean = mean / (i + 1)
for k in range(0, i + 1):
var += (df[col][k] - mean) ** 2
var = var / i
lst.append(mean)
lst1.append(var)
if mean_or_var == 'mean':
return lst
else:
return lst1
##############################################
# -------- Q=Value --------#
#############################################
def q_value(y_tt, lags):
ry = []
den = 0
y_bar = np.mean(y_tt)
for i in range(len(y_tt)):
den += (y_tt[i] - y_bar) ** 2
for k in range(0, lags + 1):
num = 0
for j in range(k, len(y_tt)):
num += (y_tt[j] - y_bar) * (y_tt[j - k] - y_bar)
acf = num / den
ry.append(acf)
# print(ry)
ry = [number ** 2 for number in ry[1:]]
q_value = np.sum(ry) * len(y_tt)
return q_value
##############################################
# -------- Average Forecast Method --------#
#############################################
def avg_forecast_method(tr, tt):
pred = []
train_err = []
test_err = []
pred_test = []
for i in range(1, len(tr)):
p = 0
for j in range(0, i):
p += (tr[j])
p = p / i
e = tr[i] - p
pred.append(p)
train_err.append(e)
p = | np.sum(tr) | numpy.sum |
# Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""main"""
import argparse
import os
import json
import cv2
import numpy as np
from api.infer import SdkApi
from config.config import config
from tqdm import tqdm
from shapely.geometry import Polygon
def parser_args():
"""parser_args"""
parser = argparse.ArgumentParser(description="siamRPN inference")
parser.add_argument("--img_path",
type=str,
required=False,
default="../data/input/vot2015",
help="image directory.")
parser.add_argument(
"--pipeline_path",
type=str,
required=False,
default="../data/config/siamRPN.pipeline",
help="image file path. The default is '../data/config/siamRPN.pipeline'. ")
parser.add_argument(
"--infer_result_dir",
type=str,
required=False,
default="./result",
help="cache dir of inference result. The default is './result'."
)
arg = parser.parse_args()
return arg
def get_instance_image(img, bbox, size_z, size_x, context_amount, img_mean=None):
cx, cy, w, h = bbox # float type
wc_z = w + context_amount * (w + h)
hc_z = h + context_amount * (w + h)
s_z = np.sqrt(wc_z * hc_z) # the width of the crop box
s_x = s_z * size_x / size_z
instance_img, scale_x = crop_and_pad(img, cx, cy, size_x, s_x, img_mean)
w_x = w * scale_x
h_x = h * scale_x
return instance_img, w_x, h_x, scale_x
def get_exemplar_image(img, bbox, size_z, context_amount, img_mean=None):
cx, cy, w, h = bbox
wc_z = w + context_amount * (w + h)
hc_z = h + context_amount * (w + h)
s_z = np.sqrt(wc_z * hc_z)
scale_z = size_z / s_z
exemplar_img, _ = crop_and_pad(img, cx, cy, size_z, s_z, img_mean)
return exemplar_img, scale_z, s_z
def round_up(value):
return round(value + 1e-6 + 1000) - 1000
def crop_and_pad(img, cx, cy, model_sz, original_sz, img_mean=None):
"""change img size
:param img:rgb
:param cx: center x
:param cy: center y
:param model_sz: changed size
:param original_sz: origin size
:param img_mean: mean of img
:return: changed img ,scale for origin to changed
"""
im_h, im_w, _ = img.shape
xmin = cx - (original_sz - 1) / 2
xmax = xmin + original_sz - 1
ymin = cy - (original_sz - 1) / 2
ymax = ymin + original_sz - 1
left = int(round_up(max(0., -xmin)))
top = int(round_up(max(0., -ymin)))
right = int(round_up(max(0., xmax - im_w + 1)))
bottom = int(round_up(max(0., ymax - im_h + 1)))
xmin = int(round_up(xmin + left))
xmax = int(round_up(xmax + left))
ymin = int(round_up(ymin + top))
ymax = int(round_up(ymax + top))
r, c, k = img.shape
if any([top, bottom, left, right]):
# 0 is better than 1 initialization
te_im = np.zeros((r + top + bottom, c + left + right, k), np.uint8)
te_im[top:top + r, left:left + c, :] = img
if top:
te_im[0:top, left:left + c, :] = img_mean
if bottom:
te_im[r + top:, left:left + c, :] = img_mean
if left:
te_im[:, 0:left, :] = img_mean
if right:
te_im[:, c + left:, :] = img_mean
im_patch_original = te_im[int(ymin):int(
ymax + 1), int(xmin):int(xmax + 1), :]
else:
im_patch_original = img[int(ymin):int(
ymax + 1), int(xmin):int(xmax + 1), :]
if not np.array_equal(model_sz, original_sz):
# zzp: use cv to get a better speed
im_patch = cv2.resize(im_patch_original, (model_sz, model_sz))
else:
im_patch = im_patch_original
scale = model_sz / im_patch_original.shape[0]
return im_patch, scale
def generate_anchors(total_stride, base_size, scales, ratios, score_size):
""" anchor generator function"""
anchor_num = len(ratios) * len(scales)
anchor = np.zeros((anchor_num, 4), dtype=np.float32)
size = base_size * base_size
count = 0
for ratio in ratios:
ws = int(np.sqrt(size / ratio))
hs = int(ws * ratio)
for scale in scales:
wws = ws * scale
hhs = hs * scale
anchor[count, 0] = 0
anchor[count, 1] = 0
anchor[count, 2] = wws
anchor[count, 3] = hhs
count += 1
anchor = np.tile(anchor, score_size * score_size).reshape((-1, 4))
ori = - (score_size // 2) * total_stride
xx, yy = np.meshgrid([ori + total_stride * dx for dx in range(score_size)],
[ori + total_stride * dy for dy in range(score_size)])
xx, yy = np.tile(xx.flatten(), (anchor_num, 1)).flatten(), \
np.tile(yy.flatten(), (anchor_num, 1)).flatten()
anchor[:, 0], anchor[:, 1] = xx.astype(np.float32), yy.astype(np.float32)
return anchor
def box_transform_inv(anchors, offset):
"""invert transform box
:param anchors: object
:param offset: object
:return: object
"""
anchor_xctr = anchors[:, :1]
anchor_yctr = anchors[:, 1:2]
anchor_w = anchors[:, 2:3]
anchor_h = anchors[:, 3:]
offset_x, offset_y, offset_w, offset_h = offset[:,
:1], offset[:, 1:2], offset[:, 2:3], offset[:, 3:],
box_cx = anchor_w * offset_x + anchor_xctr
box_cy = anchor_h * offset_y + anchor_yctr
box_w = anchor_w * np.exp(offset_w)
box_h = anchor_h * np.exp(offset_h)
box = np.hstack([box_cx, box_cy, box_w, box_h])
return box
def get_axis_aligned_bbox(region):
""" convert region to (cx, cy, w, h) that represent by axis aligned box
"""
nv = len(region)
region = np.array(region)
if nv == 8:
x1 = min(region[0::2])
x2 = max(region[0::2])
y1 = min(region[1::2])
y2 = max(region[1::2])
A1 = np.linalg.norm(region[0:2] - region[2:4]) * \
np.linalg.norm(region[2:4] - region[4:6])
A2 = (x2 - x1) * (y2 - y1)
s = np.sqrt(A1 / A2)
w = s * (x2 - x1) + 1
h = s * (y2 - y1) + 1
x = x1
y = y1
else:
x = region[0]
y = region[1]
w = region[2]
h = region[3]
return x, y, w, h
def softmax(y):
"""softmax of numpy"""
x = y.copy()
if len(x.shape) > 1:
tmp = np.max(x, axis=1)
x -= tmp.reshape((x.shape[0], 1))
x = np.exp(x)
tmp = np.sum(x, axis=1)
x /= tmp.reshape((x.shape[0], 1))
else:
tmp = np.max(x)
x -= tmp
x = np.exp(x)
tmp = np.sum(x)
x /= tmp
return x
def judge_failures(pred_bbox, gt_bbox, threshold=0):
"""" judge whether to fail or not """
if len(gt_bbox) == 4:
if iou(np.array(pred_bbox).reshape(-1, 4), np.array(gt_bbox).reshape(-1, 4)) > threshold:
return False
else:
poly_pred = Polygon(np.array([[pred_bbox[0], pred_bbox[1]],
[pred_bbox[2], pred_bbox[1]],
[pred_bbox[2], pred_bbox[3]],
[pred_bbox[0], pred_bbox[3]]
])).convex_hull
poly_gt = Polygon(np.array(gt_bbox).reshape(4, 2)).convex_hull
inter_area = poly_gt.intersection(poly_pred).area
overlap = inter_area / (poly_gt.area + poly_pred.area - inter_area)
if overlap > threshold:
return False
return True
def calculate_accuracy_failures(pred_trajectory, gt_trajectory,
bound=None):
'''
args:
pred_trajectory:list of bbox
gt_trajectory: list of bbox ,shape == pred_trajectory
bound :w and h of img
return :
overlaps:list ,iou value in pred_trajectory
acc : mean iou value
failures: failures point in pred_trajectory
num_failures: number of failres
'''
overlaps = []
failures = []
for i, pred_traj in enumerate(pred_trajectory):
if len(pred_traj) == 1:
if pred_trajectory[i][0] == 2:
failures.append(i)
overlaps.append(float("nan"))
else:
if bound is not None:
poly_img = Polygon(np.array([[0, 0],
[0, bound[1]],
[bound[0], bound[1]],
[bound[0], 0]])).convex_hull
if len(gt_trajectory[i]) == 8:
poly_pred = Polygon(np.array([[pred_trajectory[i][0], pred_trajectory[i][1]],
[pred_trajectory[i][2], pred_trajectory[i][1]],
[pred_trajectory[i][2], pred_trajectory[i][3]],
[pred_trajectory[i][0], pred_trajectory[i][3]]
])).convex_hull
poly_gt = Polygon(
np.array(gt_trajectory[i]).reshape(4, 2)).convex_hull
if bound is not None:
gt_inter_img = poly_gt.intersection(poly_img)
pred_inter_img = poly_pred.intersection(poly_img)
inter_area = gt_inter_img.intersection(pred_inter_img).area
overlap = inter_area / \
(gt_inter_img.area + pred_inter_img.area - inter_area)
else:
inter_area = poly_gt.intersection(poly_pred).area
overlap = inter_area / \
(poly_gt.area + poly_pred.area - inter_area)
elif len(gt_trajectory[i]) == 4:
overlap = iou(np.array(pred_trajectory[i]).reshape(-1, 4), np.array(gt_trajectory[i]).reshape(-1, 4))
overlaps.append(overlap)
acc = 0
num_failures = len(failures)
if overlaps:
acc = np.nanmean(overlaps)
return acc, overlaps, failures, num_failures
def calculate_expected_overlap(fragments, fweights):
""" compute expected iou """
max_len = fragments.shape[1]
expected_overlaps = np.zeros((max_len), np.float32)
expected_overlaps[0] = 1
# TODO Speed Up
for i in range(1, max_len):
mask = np.logical_not(np.isnan(fragments[:, i]))
if np.any(mask):
fragment = fragments[mask, 1:i+1]
seq_mean = np.sum(fragment, 1) / fragment.shape[1]
expected_overlaps[i] = np.sum(seq_mean *
fweights[mask]) / np.sum(fweights[mask])
return expected_overlaps
def iou(box1, box2):
""" compute iou """
box1, box2 = box1.copy(), box2.copy()
N = box1.shape[0]
K = box2.shape[0]
box1 = np.array(box1.reshape((N, 1, 4))) + \
np.zeros((1, K, 4)) # box1=[N,K,4]
box2 = np.array(box2.reshape((1, K, 4))) + \
np.zeros((N, 1, 4)) # box1=[N,K,4]
x_max = np.max(np.stack((box1[:, :, 0], box2[:, :, 0]), axis=-1), axis=2)
x_min = np.min(np.stack((box1[:, :, 2], box2[:, :, 2]), axis=-1), axis=2)
y_max = np.max(np.stack((box1[:, :, 1], box2[:, :, 1]), axis=-1), axis=2)
y_min = np.min(np.stack((box1[:, :, 3], box2[:, :, 3]), axis=-1), axis=2)
tb = x_min-x_max
lr = y_min-y_max
tb[np.where(tb < 0)] = 0
lr[np.where(lr < 0)] = 0
over_square = tb*lr
all_square = (box1[:, :, 2] - box1[:, :, 0]) * (box1[:, :, 3] - box1[:, :, 1]) + (box2[:, :, 2] - \
box2[:, :, 0]) * (box2[:, :, 3] - box2[:, :, 1]) - over_square
return over_square / all_square
def calculate_eao(dataset_name, all_failures, all_overlaps, gt_traj_length, skipping=5):
'''
input:dataset name
all_failures: type is list , index of failure
all_overlaps: type is list , length of list is the length of all_failures
gt_traj_length: type is list , length of list is the length of all_failures
skipping:number of skipping per failing
'''
if dataset_name == "VOT2016":
low = 108
high = 371
elif dataset_name == "VOT2015":
low = 108
high = 371
fragment_num = sum([len(x)+1 for x in all_failures])
max_len = max([len(x) for x in all_overlaps])
tags = [1] * max_len
seq_weight = 1 / (1 + 1e-10) # division by zero
eao = {}
# prepare segments
fweights = np.ones(fragment_num, dtype=np.float32) * np.nan
fragments = np.ones((fragment_num, max_len), dtype=np.float32) * np.nan
seg_counter = 0
for traj_len, failures, overlaps in zip(gt_traj_length, all_failures, all_overlaps):
if failures:
points = [x+skipping for x in failures if
x+skipping <= len(overlaps)]
points.insert(0, 0)
for i, _ in enumerate(points):
if i != len(points) - 1:
fragment = np.array(
overlaps[points[i]:points[i+1]+1], dtype=np.float32)
fragments[seg_counter, :] = 0
else:
fragment = np.array(overlaps[points[i]:], dtype=np.float32)
fragment[np.isnan(fragment)] = 0
fragments[seg_counter, :len(fragment)] = fragment
if i != len(points) - 1:
tag_value = tags[points[i]:points[i+1]+1]
w = sum(tag_value) / (points[i+1] - points[i]+1)
fweights[seg_counter] = seq_weight * w
else:
tag_value = tags[points[i]:len(overlaps)]
w = sum(tag_value) / (traj_len - points[i]+1e-16)
fweights[seg_counter] = seq_weight * w
seg_counter += 1
else:
# no failure
max_idx = min(len(overlaps), max_len)
fragments[seg_counter, :max_idx] = overlaps[:max_idx]
tag_value = tags[0: max_idx]
w = sum(tag_value) / max_idx
fweights[seg_counter] = seq_weight * w
seg_counter += 1
expected_overlaps = calculate_expected_overlap(fragments, fweights)
print(len(expected_overlaps))
# calculate eao
weight = np.zeros((len(expected_overlaps)))
weight[low-1:high-1+1] = 1
expected_overlaps = np.array(expected_overlaps, dtype=np.float32)
is_valid = np.logical_not( | np.isnan(expected_overlaps) | numpy.isnan |
import subprocess
import numpy as np
import pytest
class Datasets:
def __init__(self, train_data, train_labels, test_data, test_labels):
self.train_data = train_data
self.train_labels = train_labels
self.test_data = test_data
self.test_labels = test_labels
def test_dataset_shapes(featurize_stage):
assert np.shape(featurize_stage.train_data) == (60000, 784)
assert np.shape(featurize_stage.train_labels) == (60000,)
assert np.shape(featurize_stage.test_data) == (10000, 784)
assert np.shape(featurize_stage.test_labels) == (10000,)
def test_dataset_standardized(featurize_stage):
train_mean = np.mean(featurize_stage.train_data)
train_std = | np.std(featurize_stage.train_data) | numpy.std |
""" Solve the radial Teukolsky equation via Leaver's method.
"""
from __future__ import division, print_function, absolute_import
from numba import njit
import numpy as np
from .contfrac import lentz
# TODO some documentation here, better documentation throughout
@njit(cache=True)
def sing_pt_char_exps(omega, a, s, m):
r""" Compute the three characteristic exponents of the singular points
of the radial Teukolsky equation.
We want ingoing at the outer horizon and outgoing at infinity. The
choice of one of two possible characteristic exponents at the
inner horizon doesn't affect the minimal solution in Leaver's
method, so we just pick one. Thus our choices are, in the
nomenclature of [1]_, :math:`(\zeta_+, \xi_-, \eta_+)`.
Parameters
----------
omega: complex
The complex frequency in the ansatz for the solution of the
radial Teukolsky equation.
a: double
Spin parameter of the black hole, 0. <= a < 1 .
s: int
Spin weight of the field (i.e. -2 for gravitational).
m: int
Azimuthal number for the perturbation.
Returns
-------
(complex, complex, complex)
:math:`(\zeta_+, \xi_-, \eta_+)`
References
----------
.. [1] <NAME>, <NAME>, "Gravitational perturbations of the
Kerr geometry: High-accuracy study," Phys. Rev. D 90, 124021
(2014), https://arxiv.org/abs/1410.7698 .
"""
root = np.sqrt(1. - a*a)
r_p, r_m = 1. + root, 1. - root
sigma_p = (2.*omega*r_p - m*a)/(2.*root)
sigma_m = (2.*omega*r_m - m*a)/(2.*root)
zeta = +1.j * omega # This is the choice \zeta_+
xi = - s - 1.j * sigma_p # This is the choice \xi_-
eta = -1.j * sigma_m # This is the choice \eta_+
return zeta, xi, eta
@njit(cache=True)
def D_coeffs(omega, a, s, m, A):
""" The D_0 through D_4 coefficients that enter into the radial
infinite continued fraction, Eqs. (31) of [1]_ .
Parameters
----------
omega: complex
The complex frequency in the ansatz for the solution of the
radial Teukolsky equation.
a: double
Spin parameter of the black hole, 0. <= a < 1 .
s: int
Spin weight of the field (i.e. -2 for gravitational).
m: int
Azimuthal number for the perturbation.
A: complex
Separation constant between angular and radial ODEs.
Returns
-------
array[5] of complex
D_0 through D_4 .
References
----------
.. [1] <NAME>, <NAME>, "Gravitational perturbations of the
Kerr geometry: High-accuracy study," Phys. Rev. D 90, 124021
(2014), https://arxiv.org/abs/1410.7698 .
"""
zeta, xi, eta = sing_pt_char_exps(omega, a, s, m)
root = np.sqrt(1. - a*a)
p = root * zeta
alpha = 1. + s + xi + eta - 2.*zeta + s # Because we took the root \zeta_+
gamma = 1. + s + 2.*eta
delta = 1. + s + 2.*xi
sigma = (A + a*a*omega*omega - 8.*omega*omega
+ p * (2.*alpha + gamma - delta)
+ (1. + s - 0.5*(gamma + delta))
* (s + 0.5*(gamma + delta)))
D = [0.j] * 5
D[0] = delta
D[1] = 4.*p - 2.*alpha + gamma - delta - 2.
D[2] = 2.*alpha - gamma + 2.
D[3] = alpha*(4.*p - delta) - sigma
D[4] = alpha*(alpha - gamma + 1.)
return D
def leaver_cf_trunc_inversion(omega, a, s, m, A,
n_inv, N=300, r_N=1.):
""" Legacy function.
Approximate the n_inv inversion of the infinite continued
fraction for solving the radial Teukolsky equation, using
N terms total for the approximation. This uses "bottom up"
evaluation, and you can pass a seed value r_N to assume for
the rest of the infinite fraction which has been truncated.
The value returned is Eq. (44) of [1]_.
Parameters
----------
omega: complex
The complex frequency for evaluating the infinite continued
fraction.
a: float
Spin parameter of the black hole, 0. <= a < 1 .
s: int
Spin weight of the field (i.e. -2 for gravitational).
m: int
Azimuthal number for the perturbation.
A: complex
Separation constant between angular and radial ODEs.
n_inv: int
Inversion number for the infinite continued fraction. Finding
the nth overtone is typically most stable when n_inv = n .
N: int, optional [default: 300]
The depth where the infinite continued fraction is truncated.
r_N: float, optional [default: 1.]
Value to assume for the rest of the infinite continued fraction
past the point of truncation.
Returns
-------
complex
The nth inversion of the infinite continued fraction evaluated
with these arguments.
References
----------
.. [1] <NAME>, <NAME>, "Gravitational perturbations of the
Kerr geometry: High-accuracy study," Phys. Rev. D 90, 124021
(2014), https://arxiv.org/abs/1410.7698 .
"""
n = np.arange(0, N+1)
D = D_coeffs(omega, a, s, m, A)
alpha = n*n + (D[0] + 1.)*n + D[0]
beta = -2.*n*n + (D[1] + 2.)*n + D[3]
gamma = n*n + (D[2] - 3.)*n + D[4] - D[2] + 2.
conv1 = 0.
for i in range(0, n_inv): # n_inv is not included
conv1 = alpha[i] / (beta[i] - gamma[i] * conv1)
conv2 = -r_N # Is this sign correct?
for i in range(N, n_inv, -1): # n_inv is not included
conv2 = gamma[i] / (beta[i] - alpha[i] * conv2)
return (beta[n_inv]
- gamma[n_inv] * conv1
- alpha[n_inv] * conv2)
# TODO possible choices for r_N: 0., 1., approximation using (34)-(38)
# Definitions for a_i, b_i for continued fraction below
# In defining the below a, b sequences, I have cleared a fraction
# compared to the usual way of writing the radial infinite
# continued fraction. The point of doing this was that so both
# terms, a(n) and b(n), tend to 1 as n goes to infinity. Further,
# We can analytically divide through by n in the numerator and
# denominator to make the numbers closer to 1.
@njit(cache=True)
def rad_a(i, n_inv, D):
n = i + n_inv - 1
return -(n*n + (D[0] + 1.)*n + D[0])/(n*n + (D[2] - 3.)*n + D[4] - D[2] + 2.)
@njit(cache=True)
def rad_b(i, n_inv, D):
if (i==0): return 0
n = i + n_inv
return (-2.*n*n + (D[1] + 2.)*n + D[3])/(n*n + (D[2] - 3.)*n + D[4] - D[2] + 2.)
#Note that we do not jit the following function, since lentz is not jitted.
def leaver_cf_inv_lentz_old(omega, a, s, m, A, n_inv,
tol=1.e-10, N_min=0, N_max=np.Inf):
""" Legacy function. Same as :meth:`leaver_cf_inv_lentz` except
calling :meth:`qnm.contfrac.lentz`. We do not jit this function
since lentz is not jitted. It remains here for testing purposes.
See documentation for :meth:`leaver_cf_inv_lentz` for parameters
and return value.
Examples
--------
>>> from qnm.radial import leaver_cf_inv_lentz_old, leaver_cf_inv_lentz
>>> print(leaver_cf_inv_lentz_old(omega=.4 - 0.2j, a=0.02, s=-2, m=2, A=4.+0.j, n_inv=0))
((-3.5662773770495972-1.538871079338485j), 9.702532283649582e-11, 76)
Compare the two versions of the function:
>>> old = leaver_cf_inv_lentz_old(omega=.4 - 0.2j, a=0.02, s=-2, m=2, A=4.+0.j, n_inv=0)
>>> new = leaver_cf_inv_lentz(omega=.4 - 0.2j, a=0.02, s=-2, m=2, A=4.+0.j, n_inv=0)
>>> [ old[i]-new[i] for i in range(3)]
[0j, 0.0, 0]
"""
D = D_coeffs(omega, a, s, m, A)
# This is only use for the terminating fraction
n = np.arange(0, n_inv+1)
alpha = n*n + (D[0] + 1.)*n + D[0]
beta = -2.*n*n + (D[1] + 2.)*n + D[3]
gamma = n*n + (D[2] - 3.)*n + D[4] - D[2] + 2.
conv1 = 0.
for i in range(0, n_inv): # n_inv is not included
conv1 = alpha[i] / (beta[i] - gamma[i] * conv1)
conv2, cf_err, n_frac = lentz(rad_a, rad_b,
args=(n_inv, D),
tol=tol, N_min=N_min, N_max=N_max)
return (beta[n_inv]
- gamma[n_inv] * conv1
+ gamma[n_inv] * conv2), cf_err, n_frac
@njit(cache=True)
def leaver_cf_inv_lentz(omega, a, s, m, A, n_inv,
tol=1.e-10, N_min=0, N_max=np.Inf):
"""Compute the n_inv inversion of the infinite continued
fraction for solving the radial Teukolsky equation, using
modified Lentz's method.
The value returned is Eq. (44) of [1]_.
Same as :meth:`leaver_cf_inv_lentz_old`, but with Lentz's method
inlined so that numba can speed things up.
Parameters
----------
omega: complex
The complex frequency for evaluating the infinite continued
fraction.
a: float
Spin parameter of the black hole, 0. <= a < 1 .
s: int
Spin weight of the field (i.e. -2 for gravitational).
m: int
Azimuthal number for the perturbation.
A: complex
Separation constant between angular and radial ODEs.
n_inv: int
Inversion number for the infinite continued fraction. Finding
the nth overtone is typically most stable when n_inv = n .
tol: float, optional [default: 1.e-10]
Tolerance for termination of Lentz's method.
N_min: int, optional [default: 0]
Minimum number of iterations through Lentz's method.
N_max: int or comparable, optional [default: np.Inf]
Maximum number of iterations for Lentz's method.
Returns
-------
(complex, float, int)
The first value (complex) is the nth inversion of the infinite
continued fraction evaluated with these arguments. The second
value (float) is the estimated error from Lentz's method. The
third value (int) is the number of iterations of Lentz's method.
Examples
--------
>>> from qnm.radial import leaver_cf_inv_lentz
>>> print(leaver_cf_inv_lentz(omega=.4 - 0.2j, a=0.02, s=-2, m=2, A=4.+0.j, n_inv=0))
((-3.5662773770495972-1.538871079338485j), 9.702532283649582e-11, 76)
References
----------
.. [1] <NAME>, <NAME>, "Gravitational perturbations of the
Kerr geometry: High-accuracy study," Phys. Rev. D 90, 124021
(2014), https://arxiv.org/abs/1410.7698 .
"""
D = D_coeffs(omega, a, s, m, A)
# This is only use for the terminating fraction
n = np.arange(0, n_inv+1)
alpha = n*n + (D[0] + 1.)*n + D[0]
beta = -2.*n*n + (D[1] + 2.)*n + D[3]
gamma = n*n + (D[2] - 3.)*n + D[4] - D[2] + 2.
conv1 = 0.
for i in range(0, n_inv): # n_inv is not included
conv1 = alpha[i] / (beta[i] - gamma[i] * conv1)
##############################
# Beginning of Lentz's method, inlined
# TODO should tiny be a parameter?
tiny = 1.e-30
# This is starting with b_0 = 0 for the infinite continued
# fraction. I could have started with other values (e.g. b_i
# evaluated with i=0) but then I would have had to subtract that
# same quantity away from the final result. I don't know if this
# affects convergence.
f_old = tiny
C_old = f_old
D_old = 0.
conv = False
j = 1
n = n_inv
while ((not conv) and (j < N_max)):
# In defining the below a, b sequences, I have cleared a fraction
# compared to the usual way of writing the radial infinite
# continued fraction. The point of doing this was that so both
# terms, a(n) and b(n), tend to 1 as n goes to infinity. Further,
# We can analytically divide through by n in the numerator and
# denominator to make the numbers closer to 1.
an = -(n*n + (D[0] + 1.)*n + D[0])/(n*n + (D[2] - 3.)*n + D[4] - D[2] + 2.)
n = n + 1
bn = (-2.*n*n + (D[1] + 2.)*n + D[3])/(n*n + (D[2] - 3.)*n + D[4] - D[2] + 2.)
D_new = bn + an * D_old
if (D_new == 0):
D_new = tiny
C_new = bn + an / C_old
if (C_new == 0):
C_new = tiny
D_new = 1./D_new
Delta = C_new * D_new
f_new = f_old * Delta
if ((j > N_min) and ( | np.abs(Delta - 1.) | numpy.abs |
# coding: utf-8
from __future__ import division, print_function
# Standard library
import time
# Third-party
import matplotlib.pyplot as plt
import numpy as np
from scipy.misc import derivative
from astropy.extern.six.moves import cPickle as pickle
import pytest
# Project
from ..io import load
from ..core import CompositePotential
from ....units import UnitSystem, DimensionlessUnitSystem
from ....dynamics import PhaseSpacePosition
from ....integrate import LeapfrogIntegrator
def partial_derivative(func, point, dim_ix=0, **kwargs):
xyz = np.array(point, copy=True)
def wraps(a):
xyz[dim_ix] = a
return func(xyz)
return derivative(wraps, point[dim_ix], **kwargs)
class PotentialTestBase(object):
name = None
potential = None # MUST SET THIS
tol = 1E-5
show_plots = False
@classmethod
def setup_class(cls):
if cls.name is None:
cls.name = cls.__name__[4:] # remove Test
print("Testing potential: {}".format(cls.name))
cls.w0 = np.array(cls.w0)
cls.ndim = cls.w0.size // 2
# TODO: need to test also quantity objects and phasespacepositions!
# these are arrays we will test the methods on:
w0_2d = np.repeat(cls.w0[:,None], axis=1, repeats=16)
w0_3d = np.repeat(w0_2d[...,None], axis=2, repeats=8)
w0_list = list(cls.w0)
w0_slice = w0_2d[:,:4]
cls.w0s = [cls.w0, w0_2d, w0_3d, w0_list, w0_slice]
cls._grad_return_shapes = [cls.w0[:cls.ndim].shape + (1,),
w0_2d[:cls.ndim].shape,
w0_3d[:cls.ndim].shape,
cls.w0[:cls.ndim].shape + (1,),
w0_slice[:cls.ndim].shape]
cls._hess_return_shapes = [(cls.ndim,) + cls.w0[:cls.ndim].shape + (1,),
(cls.ndim,) + w0_2d[:cls.ndim].shape,
(cls.ndim,) + w0_3d[:cls.ndim].shape,
(cls.ndim,) + cls.w0[:cls.ndim].shape + (1,),
(cls.ndim,) + w0_slice[:cls.ndim].shape]
cls._valu_return_shapes = [x[1:] for x in cls._grad_return_shapes]
def test_unitsystem(self):
assert isinstance(self.potential.units, UnitSystem)
def test_energy(self):
assert self.ndim == self.potential.ndim
for arr,shp in zip(self.w0s, self._valu_return_shapes):
v = self.potential.energy(arr[:self.ndim])
assert v.shape == shp
g = self.potential.energy(arr[:self.ndim], t=0.1)
g = self.potential.energy(arr[:self.ndim], t=0.1*self.potential.units['time'])
t = np.zeros(np.array(arr).shape[1:]) + 0.1
g = self.potential.energy(arr[:self.ndim], t=t)
g = self.potential.energy(arr[:self.ndim], t=t*self.potential.units['time'])
def test_gradient(self):
for arr,shp in zip(self.w0s, self._grad_return_shapes):
g = self.potential.gradient(arr[:self.ndim])
assert g.shape == shp
g = self.potential.gradient(arr[:self.ndim], t=0.1)
g = self.potential.gradient(arr[:self.ndim], t=0.1*self.potential.units['time'])
t = np.zeros(np.array(arr).shape[1:]) + 0.1
g = self.potential.gradient(arr[:self.ndim], t=t)
g = self.potential.gradient(arr[:self.ndim], t=t*self.potential.units['time'])
def test_hessian(self):
for arr,shp in zip(self.w0s, self._hess_return_shapes):
g = self.potential.hessian(arr[:self.ndim])
assert g.shape == shp
g = self.potential.hessian(arr[:self.ndim], t=0.1)
g = self.potential.hessian(arr[:self.ndim], t=0.1*self.potential.units['time'])
t = np.zeros(np.array(arr).shape[1:]) + 0.1
g = self.potential.hessian(arr[:self.ndim], t=t)
g = self.potential.hessian(arr[:self.ndim], t=t*self.potential.units['time'])
def test_mass_enclosed(self):
for arr,shp in zip(self.w0s, self._valu_return_shapes):
g = self.potential.mass_enclosed(arr[:self.ndim])
assert g.shape == shp
assert np.all(g > 0.)
g = self.potential.mass_enclosed(arr[:self.ndim], t=0.1)
g = self.potential.mass_enclosed(arr[:self.ndim], t=0.1*self.potential.units['time'])
t = np.zeros( | np.array(arr) | numpy.array |
import h5py
import pickle
import numpy as np
def load_weights():
fff = h5py.File('Mybase/mask_rcnn_coco.h5','r') #打开h5文件
#print(list(f.keys()))
mydict = {}
mydict['global_step:0'] = 1000
########res1########
dset = fff['conv1']
a = dset['conv1']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn_conv1']
a = dset['bn_conv1']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_0/conv_bn_relu1_0/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_0/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_0/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
########res2########
dset = fff['res2a_branch1']
a = dset['res2a_branch1']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn2a_branch1']
a = dset['bn2a_branch1']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_0/resnet_unit2_0/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_0/resnet_unit2_0/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_0/resnet_unit2_0/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res2a_branch2a']
a = dset['res2a_branch2a']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn2a_branch2a']
a = dset['bn2a_branch2a']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_0/resnet_unit2_0/conv_bn_relu1_0/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_0/resnet_unit2_0/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_0/resnet_unit2_0/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res2a_branch2b']
a = dset['res2a_branch2b']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn2a_branch2b']
a = dset['bn2a_branch2b']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_0/resnet_unit2_0/conv_bn_relu1_1/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_0/resnet_unit2_0/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_0/resnet_unit2_0/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res2a_branch2c']
a = dset['res2a_branch2c']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn2a_branch2c']
a = dset['bn2a_branch2c']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_0/resnet_unit2_0/conv_bn1_1/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_0/resnet_unit2_0/conv_bn1_1/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_0/resnet_unit2_0/conv_bn1_1/batchnorm1_0/BatchNorm/beta:0' ] = h
################################
dset = fff['res2b_branch2a']
a = dset['res2b_branch2a']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn2b_branch2a']
a = dset['bn2b_branch2a']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_0/resnet_unit2_1/conv_bn_relu1_0/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_0/resnet_unit2_1/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_0/resnet_unit2_1/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res2b_branch2b']
a = dset['res2b_branch2b']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn2b_branch2b']
a = dset['bn2b_branch2b']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_0/resnet_unit2_1/conv_bn_relu1_1/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_0/resnet_unit2_1/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_0/resnet_unit2_1/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res2b_branch2c']
a = dset['res2b_branch2c']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn2b_branch2c']
a = dset['bn2b_branch2c']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_0/resnet_unit2_1/conv_bn1_1/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_0/resnet_unit2_1/conv_bn1_1/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_0/resnet_unit2_1/conv_bn1_1/batchnorm1_0/BatchNorm/beta:0' ] = h
############################
dset = fff['res2c_branch2a']
a = dset['res2c_branch2a']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn2c_branch2a']
a = dset['bn2c_branch2a']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_0/resnet_unit2_2/conv_bn_relu1_0/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_0/resnet_unit2_2/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_0/resnet_unit2_2/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res2c_branch2b']
a = dset['res2c_branch2b']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn2c_branch2b']
a = dset['bn2c_branch2b']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_0/resnet_unit2_2/conv_bn_relu1_1/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_0/resnet_unit2_2/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_0/resnet_unit2_2/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res2c_branch2c']
a = dset['res2c_branch2c']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn2c_branch2c']
a = dset['bn2c_branch2c']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_0/resnet_unit2_2/conv_bn1_1/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_0/resnet_unit2_2/conv_bn1_1/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_0/resnet_unit2_2/conv_bn1_1/batchnorm1_0/BatchNorm/beta:0' ] = h
########res3########
dset = fff['res3a_branch1']
a = dset['res3a_branch1']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn3a_branch1']
a = dset['bn3a_branch1']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_0/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_0/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_0/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res3a_branch2a']
a = dset['res3a_branch2a']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn3a_branch2a']
a = dset['bn3a_branch2a']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_0/conv_bn_relu1_0/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_0/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_0/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res3a_branch2b']
a = dset['res3a_branch2b']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn3a_branch2b']
a = dset['bn3a_branch2b']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_0/conv_bn_relu1_1/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_0/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_0/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res3a_branch2c']
a = dset['res3a_branch2c']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn3a_branch2c']
a = dset['bn3a_branch2c']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_0/conv_bn1_1/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_0/conv_bn1_1/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_0/conv_bn1_1/batchnorm1_0/BatchNorm/beta:0' ] = h
################################
dset = fff['res3b_branch2a']
a = dset['res3b_branch2a']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn3b_branch2a']
a = dset['bn3b_branch2a']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_1/conv_bn_relu1_0/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_1/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_1/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res3b_branch2b']
a = dset['res3b_branch2b']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn3b_branch2b']
a = dset['bn3b_branch2b']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_1/conv_bn_relu1_1/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_1/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_1/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res3b_branch2c']
a = dset['res3b_branch2c']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn3b_branch2c']
a = dset['bn3b_branch2c']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_1/conv_bn1_1/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_1/conv_bn1_1/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_1/conv_bn1_1/batchnorm1_0/BatchNorm/beta:0' ] = h
############################
dset = fff['res3c_branch2a']
a = dset['res3c_branch2a']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn3c_branch2a']
a = dset['bn3c_branch2a']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_2/conv_bn_relu1_0/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_2/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_2/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res3c_branch2b']
a = dset['res3c_branch2b']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn3c_branch2b']
a = dset['bn3c_branch2b']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_2/conv_bn_relu1_1/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_2/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_2/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res3c_branch2c']
a = dset['res3c_branch2c']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn3c_branch2c']
a = dset['bn3c_branch2c']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_2/conv_bn1_1/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_2/conv_bn1_1/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_2/conv_bn1_1/batchnorm1_0/BatchNorm/beta:0' ] = h
############################
dset = fff['res3d_branch2a']
a = dset['res3d_branch2a']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn3d_branch2a']
a = dset['bn3d_branch2a']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_3/conv_bn_relu1_0/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_3/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_3/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res3d_branch2b']
a = dset['res3d_branch2b']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn3d_branch2b']
a = dset['bn3d_branch2b']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_3/conv_bn_relu1_1/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_3/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_3/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res3d_branch2c']
a = dset['res3d_branch2c']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn3d_branch2c']
a = dset['bn3d_branch2c']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_3/conv_bn1_1/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_3/conv_bn1_1/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_3/conv_bn1_1/batchnorm1_0/BatchNorm/beta:0' ] = h
########res4########
dset = fff['res4a_branch1']
a = dset['res4a_branch1']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4a_branch1']
a = dset['bn4a_branch1']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_0/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_0/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_0/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res4a_branch2a']
a = dset['res4a_branch2a']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4a_branch2a']
a = dset['bn4a_branch2a']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_0/conv_bn_relu1_0/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_0/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_0/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res4a_branch2b']
a = dset['res4a_branch2b']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4a_branch2b']
a = dset['bn4a_branch2b']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_0/conv_bn_relu1_1/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_0/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_0/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res4a_branch2c']
a = dset['res4a_branch2c']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4a_branch2c']
a = dset['bn4a_branch2c']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_0/conv_bn1_1/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_0/conv_bn1_1/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_0/conv_bn1_1/batchnorm1_0/BatchNorm/beta:0' ] = h
################################
dset = fff['res4b_branch2a']
a = dset['res4b_branch2a']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4b_branch2a']
a = dset['bn4b_branch2a']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_1/conv_bn_relu1_0/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_1/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_1/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res4b_branch2b']
a = dset['res4b_branch2b']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4b_branch2b']
a = dset['bn4b_branch2b']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_1/conv_bn_relu1_1/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_1/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_1/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res4b_branch2c']
a = dset['res4b_branch2c']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4b_branch2c']
a = dset['bn4b_branch2c']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_1/conv_bn1_1/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_1/conv_bn1_1/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_1/conv_bn1_1/batchnorm1_0/BatchNorm/beta:0' ] = h
############################
dset = fff['res4c_branch2a']
a = dset['res4c_branch2a']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4c_branch2a']
a = dset['bn4c_branch2a']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_2/conv_bn_relu1_0/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_2/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_2/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res4c_branch2b']
a = dset['res4c_branch2b']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4c_branch2b']
a = dset['bn4c_branch2b']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_2/conv_bn_relu1_1/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_2/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_2/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res4c_branch2c']
a = dset['res4c_branch2c']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4c_branch2c']
a = dset['bn4c_branch2c']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_2/conv_bn1_1/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_2/conv_bn1_1/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_2/conv_bn1_1/batchnorm1_0/BatchNorm/beta:0' ] = h
############################
dset = fff['res4d_branch2a']
a = dset['res4d_branch2a']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4d_branch2a']
a = dset['bn4d_branch2a']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_3/conv_bn_relu1_0/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_3/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_3/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res4d_branch2b']
a = dset['res4d_branch2b']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4d_branch2b']
a = dset['bn4d_branch2b']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_3/conv_bn_relu1_1/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_3/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_3/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res4d_branch2c']
a = dset['res4d_branch2c']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4d_branch2c']
a = dset['bn4d_branch2c']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_3/conv_bn1_1/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_3/conv_bn1_1/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_3/conv_bn1_1/batchnorm1_0/BatchNorm/beta:0' ] = h
############################
dset = fff['res4e_branch2a']
a = dset['res4e_branch2a']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4e_branch2a']
a = dset['bn4e_branch2a']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_4/conv_bn_relu1_0/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_4/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_4/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res4e_branch2b']
a = dset['res4e_branch2b']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4e_branch2b']
a = dset['bn4e_branch2b']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_4/conv_bn_relu1_1/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_4/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_4/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res4e_branch2c']
a = dset['res4e_branch2c']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4e_branch2c']
a = dset['bn4e_branch2c']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_4/conv_bn1_1/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_4/conv_bn1_1/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_4/conv_bn1_1/batchnorm1_0/BatchNorm/beta:0' ] = h
############################
dset = fff['res4f_branch2a']
a = dset['res4f_branch2a']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4f_branch2a']
a = dset['bn4f_branch2a']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_5/conv_bn_relu1_0/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_5/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_5/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res4f_branch2b']
a = dset['res4f_branch2b']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4f_branch2b']
a = dset['bn4f_branch2b']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_5/conv_bn_relu1_1/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_5/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_5/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res4f_branch2c']
a = dset['res4f_branch2c']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4f_branch2c']
a = dset['bn4f_branch2c']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_5/conv_bn1_1/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_5/conv_bn1_1/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_5/conv_bn1_1/batchnorm1_0/BatchNorm/beta:0' ] = h
############################
dset = fff['res4g_branch2a']
a = dset['res4g_branch2a']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4g_branch2a']
a = dset['bn4g_branch2a']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_6/conv_bn_relu1_0/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_6/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_6/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res4g_branch2b']
a = dset['res4g_branch2b']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4g_branch2b']
a = dset['bn4g_branch2b']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_6/conv_bn_relu1_1/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_6/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_6/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res4g_branch2c']
a = dset['res4g_branch2c']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4g_branch2c']
a = dset['bn4g_branch2c']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_6/conv_bn1_1/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_6/conv_bn1_1/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_6/conv_bn1_1/batchnorm1_0/BatchNorm/beta:0' ] = h
############################
dset = fff['res4h_branch2a']
a = dset['res4h_branch2a']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4h_branch2a']
a = dset['bn4h_branch2a']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_7/conv_bn_relu1_0/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_7/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_7/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res4h_branch2b']
a = dset['res4h_branch2b']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4h_branch2b']
a = dset['bn4h_branch2b']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_7/conv_bn_relu1_1/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_7/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_7/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res4h_branch2c']
a = dset['res4h_branch2c']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4h_branch2c']
a = dset['bn4h_branch2c']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_7/conv_bn1_1/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_7/conv_bn1_1/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_7/conv_bn1_1/batchnorm1_0/BatchNorm/beta:0' ] = h
############################
dset = fff['res4i_branch2a']
a = dset['res4i_branch2a']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4i_branch2a']
a = dset['bn4i_branch2a']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_8/conv_bn_relu1_0/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_8/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_8/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res4i_branch2b']
a = dset['res4i_branch2b']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4i_branch2b']
a = dset['bn4i_branch2b']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_8/conv_bn_relu1_1/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_8/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_8/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res4i_branch2c']
a = dset['res4i_branch2c']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4i_branch2c']
a = dset['bn4i_branch2c']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_8/conv_bn1_1/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_8/conv_bn1_1/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_8/conv_bn1_1/batchnorm1_0/BatchNorm/beta:0' ] = h
############################
dset = fff['res4j_branch2a']
a = dset['res4j_branch2a']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4j_branch2a']
a = dset['bn4j_branch2a']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_9/conv_bn_relu1_0/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_9/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_9/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res4j_branch2b']
a = dset['res4j_branch2b']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4j_branch2b']
a = dset['bn4j_branch2b']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_9/conv_bn_relu1_1/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_9/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_9/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res4j_branch2c']
a = dset['res4j_branch2c']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4j_branch2c']
a = dset['bn4j_branch2c']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_9/conv_bn1_1/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_9/conv_bn1_1/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_9/conv_bn1_1/batchnorm1_0/BatchNorm/beta:0' ] = h
############################
dset = fff['res4k_branch2a']
a = dset['res4k_branch2a']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4k_branch2a']
a = dset['bn4k_branch2a']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_10/conv_bn_relu1_0/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_10/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_10/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res4k_branch2b']
a = dset['res4k_branch2b']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4k_branch2b']
a = dset['bn4k_branch2b']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_10/conv_bn_relu1_1/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_10/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_10/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res4k_branch2c']
a = dset['res4k_branch2c']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4k_branch2c']
a = dset['bn4k_branch2c']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_10/conv_bn1_1/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_10/conv_bn1_1/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_10/conv_bn1_1/batchnorm1_0/BatchNorm/beta:0' ] = h
############################
dset = fff['res4l_branch2a']
a = dset['res4l_branch2a']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4l_branch2a']
a = dset['bn4l_branch2a']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_11/conv_bn_relu1_0/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_11/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_11/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res4l_branch2b']
a = dset['res4l_branch2b']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4l_branch2b']
a = dset['bn4l_branch2b']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_11/conv_bn_relu1_1/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_11/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_11/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res4l_branch2c']
a = dset['res4l_branch2c']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4l_branch2c']
a = dset['bn4l_branch2c']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_11/conv_bn1_1/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_11/conv_bn1_1/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_11/conv_bn1_1/batchnorm1_0/BatchNorm/beta:0' ] = h
############################
dset = fff['res4m_branch2a']
a = dset['res4m_branch2a']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4m_branch2a']
a = dset['bn4m_branch2a']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_12/conv_bn_relu1_0/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_12/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_12/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res4m_branch2b']
a = dset['res4m_branch2b']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4m_branch2b']
a = dset['bn4m_branch2b']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_12/conv_bn_relu1_1/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_12/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_12/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res4m_branch2c']
a = dset['res4m_branch2c']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4m_branch2c']
a = dset['bn4m_branch2c']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_12/conv_bn1_1/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_12/conv_bn1_1/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_12/conv_bn1_1/batchnorm1_0/BatchNorm/beta:0' ] = h
############################
dset = fff['res4n_branch2a']
a = dset['res4n_branch2a']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4n_branch2a']
a = dset['bn4n_branch2a']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_13/conv_bn_relu1_0/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_13/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_13/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res4n_branch2b']
a = dset['res4n_branch2b']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4n_branch2b']
a = dset['bn4n_branch2b']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_13/conv_bn_relu1_1/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_13/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_13/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res4n_branch2c']
a = dset['res4n_branch2c']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4n_branch2c']
a = dset['bn4n_branch2c']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_13/conv_bn1_1/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_13/conv_bn1_1/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_13/conv_bn1_1/batchnorm1_0/BatchNorm/beta:0' ] = h
############################
dset = fff['res4o_branch2a']
a = dset['res4o_branch2a']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4o_branch2a']
a = dset['bn4o_branch2a']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_14/conv_bn_relu1_0/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_14/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_14/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res4o_branch2b']
a = dset['res4o_branch2b']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4o_branch2b']
a = dset['bn4o_branch2b']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_14/conv_bn_relu1_1/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_14/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_14/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res4o_branch2c']
a = dset['res4o_branch2c']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4o_branch2c']
a = dset['bn4o_branch2c']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_14/conv_bn1_1/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_14/conv_bn1_1/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_14/conv_bn1_1/batchnorm1_0/BatchNorm/beta:0' ] = h
############################
dset = fff['res4p_branch2a']
a = dset['res4p_branch2a']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4p_branch2a']
a = dset['bn4p_branch2a']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_15/conv_bn_relu1_0/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_15/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_15/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res4p_branch2b']
a = dset['res4p_branch2b']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4p_branch2b']
a = dset['bn4p_branch2b']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_15/conv_bn_relu1_1/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_15/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_15/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res4p_branch2c']
a = dset['res4p_branch2c']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4p_branch2c']
a = dset['bn4p_branch2c']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_15/conv_bn1_1/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_15/conv_bn1_1/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_15/conv_bn1_1/batchnorm1_0/BatchNorm/beta:0' ] = h
############################
dset = fff['res4q_branch2a']
a = dset['res4q_branch2a']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4q_branch2a']
a = dset['bn4q_branch2a']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_16/conv_bn_relu1_0/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_16/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_16/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res4q_branch2b']
a = dset['res4q_branch2b']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4q_branch2b']
a = dset['bn4q_branch2b']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_16/conv_bn_relu1_1/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_16/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_16/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res4q_branch2c']
a = dset['res4q_branch2c']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4q_branch2c']
a = dset['bn4q_branch2c']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_16/conv_bn1_1/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_16/conv_bn1_1/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_16/conv_bn1_1/batchnorm1_0/BatchNorm/beta:0' ] = h
############################
dset = fff['res4r_branch2a']
a = dset['res4r_branch2a']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4r_branch2a']
a = dset['bn4r_branch2a']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_17/conv_bn_relu1_0/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_17/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_17/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res4r_branch2b']
a = dset['res4r_branch2b']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4r_branch2b']
a = dset['bn4r_branch2b']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_17/conv_bn_relu1_1/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_17/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_17/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res4r_branch2c']
a = dset['res4r_branch2c']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4r_branch2c']
a = dset['bn4r_branch2c']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_17/conv_bn1_1/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_17/conv_bn1_1/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_17/conv_bn1_1/batchnorm1_0/BatchNorm/beta:0' ] = h
############################
dset = fff['res4s_branch2a']
a = dset['res4s_branch2a']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4s_branch2a']
a = dset['bn4s_branch2a']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_18/conv_bn_relu1_0/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_18/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_18/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res4s_branch2b']
a = dset['res4s_branch2b']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4s_branch2b']
a = dset['bn4s_branch2b']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_18/conv_bn_relu1_1/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_18/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_18/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res4s_branch2c']
a = dset['res4s_branch2c']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4s_branch2c']
a = dset['bn4s_branch2c']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_18/conv_bn1_1/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_18/conv_bn1_1/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_18/conv_bn1_1/batchnorm1_0/BatchNorm/beta:0' ] = h
############################
dset = fff['res4t_branch2a']
a = dset['res4t_branch2a']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4t_branch2a']
a = dset['bn4t_branch2a']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_19/conv_bn_relu1_0/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_19/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_19/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res4t_branch2b']
a = dset['res4t_branch2b']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4t_branch2b']
a = dset['bn4t_branch2b']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_19/conv_bn_relu1_1/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_19/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_19/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res4t_branch2c']
a = dset['res4t_branch2c']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4t_branch2c']
a = dset['bn4t_branch2c']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_19/conv_bn1_1/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_19/conv_bn1_1/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_19/conv_bn1_1/batchnorm1_0/BatchNorm/beta:0' ] = h
############################
dset = fff['res4u_branch2a']
a = dset['res4u_branch2a']
b = | np.array(a['kernel:0'], dtype=np.float32) | numpy.array |
import math
import pandas as pd
import numpy as np
import tensorflow as tf
from tensorflow import keras
from tensorflow.compat.v1.keras import backend as K
from sklearn.preprocessing import MinMaxScaler
'''
Author: <NAME>
This document contains code for a ConvLSTM neural network predicting SIC per pixel and per month for spatio-temporal image data.
'''
# Data loading
with open("/umbc/xfs1/cybertrn/reu2021/team1/research/GitHub/preprocessing/convlstm/Filled Rolling Data/X_train_rolling_filled_final.npy", "rb") as f:
X_train = np.load(f)
with open("/umbc/xfs1/cybertrn/reu2021/team1/research/GitHub/preprocessing/convlstm/Filled Rolling Data/y_train_rolling_filled_final.npy", "rb") as f:
y_train = np.load(f)
with open("/umbc/xfs1/cybertrn/reu2021/team1/research/GitHub/preprocessing/convlstm/Filled Rolling Data/X_test_rolling_filled_final.npy", "rb") as f:
X_test = np.load(f)
with open("/umbc/xfs1/cybertrn/reu2021/team1/research/GitHub/preprocessing/convlstm/Filled Rolling Data/y_test_rolling_filled_final.npy", "rb") as f:
y_test = np.load(f)
with open("/umbc/xfs1/cybertrn/reu2021/team1/research/GitHub/modeling/y_land_mask_actual.npy", "rb") as f:
y_land_mask = np.load(f)
with open("/umbc/xfs1/cybertrn/reu2021/team1/research/GitHub/preprocessing/convlstm/y_extent_train_rolling_final.npy", "rb") as f:
y_extent_train = np.load(f)
with open("/umbc/xfs1/cybertrn/reu2021/team1/research/GitHub/preprocessing/convlstm/y_extent_test_rolling_final.npy", "rb") as f:
y_extent_test = np.load(f)
# reshape y_land_mask
y_land_mask = y_land_mask.reshape(448, 304, 1)
from sklearn.metrics import mean_squared_error
# define custom mse loss, which applies land mask to each output of the network.
def custom_mse(y_true, y_pred):
y_pred_masked = tf.math.multiply(y_pred, y_land_mask)
y_true_masked = tf.math.multiply(y_true, y_land_mask)
squared_resids = tf.square(y_true_masked - y_pred_masked)
loss = tf.reduce_mean(squared_resids)
return loss
#load per-pixel area file
with open("/umbc/xfs1/cybertrn/reu2021/team1/research/GitHub/preprocessing/area_size.npy", "rb") as f:
areas = np.load(f)
#calculate ice extent
def calc_ice_extent(array):
array = array / 100.0
# use simple extent calculation
tot_ice_extent = np.sum(np.multiply(array > 15.0, areas), axis=(1,2)) / 1e6
return tot_ice_extent
# define ConvLSTM model
def create_convLSTM_image():
#add ConvLSTM layers
inputs = keras.layers.Input(shape=X_train.shape[1:])
x = keras.layers.ConvLSTM2D(16, (5,5), padding="same", input_shape = (12, 448, 304, 11), return_sequences=False,
activation="relu", data_format = 'channels_last')(inputs)
x = keras.layers.MaxPooling2D((2,2), padding='same')(x)
x = keras.layers.Conv2D(128, (5,5), padding="same", activation="relu")(x)
x = keras.layers.MaxPooling2D((2,2), padding='same')(x)
x = keras.layers.Conv2D(32, (5,5), padding="same", activation="relu")(x)
x = keras.layers.Flatten()(x)
x = keras.layers.Dense(256, activation="relu")(x)
x = keras.layers.Dense(512, activation="relu")(x)
x = keras.layers.Dense(448*304, activation="linear")(x)
sic_output = keras.layers.Reshape((448, 304, 1), input_shape = (448*304,))(x)
#initialize model
model = keras.models.Model(inputs=inputs,
outputs=sic_output,
name="SIC_net")
#compile model
model.compile(optimizer="adamax", loss=custom_mse, metrics=[keras.metrics.RootMeanSquaredError()])
return model
# define loss weights for each output
sample_weight = np.ones(shape=(len(y_train),))
train_extent = calc_ice_extent(y_train)
sample_weight[9::12]=1.2
print("first 5 sample weights: {}".format(sample_weight[0:5]))
# define early stopping callback
early_stopping = keras.callbacks.EarlyStopping(patience=100, restore_best_weights=True)
# call model creation
convLSTM_image = create_convLSTM_image()
# print model summary
print(convLSTM_image.summary())
# fit model
history2 = convLSTM_image.fit(x=X_train, y=y_train,
batch_size=4,
epochs=1000,
validation_split = .2,
sample_weight=sample_weight,
callbacks=[early_stopping])
# save model
convLSTM_image.save("convLSTM_image")
# image output
# train prediction
image_train_preds = convLSTM_image.predict(X_train)
image_train_rmse = math.sqrt(mean_squared_error(y_train.flatten(), image_train_preds.flatten()))
print("Image Concentration Train RMSE: {}".format(image_train_rmse))
print("Image Concentration Train NRMSE: {}".format(image_train_rmse / np.mean(y_train)))
print("Image Concentration Train NRMSE (std. dev.): {}".format(image_train_rmse / np.std(y_train)))
print("Train Prediction Shape: {}".format(image_train_preds.shape))
# test prediction
image_test_preds = convLSTM_image.predict(X_test)
image_test_rmse = math.sqrt(mean_squared_error(y_test.flatten(), image_test_preds.flatten()))
print("Image Concentration Test RMSE: {}".format(image_test_rmse))
print("Image Concentration Test NRMSE: {}".format(image_test_rmse / np.mean(y_test)))
print("Image Concentration Test NRMSE: {}".format(image_test_rmse / np.std(y_test)))
print("Test Prediction Shape: {}".format(image_test_preds.shape))
# save image outputs:
print(image_test_preds.shape)
with open("/umbc/xfs1/cybertrn/reu2021/team1/research/GitHub/evaluation/convlstm/convlstm_image_rolling_filled_preds.npy", "wb") as f:
np.save(f, image_test_preds)
with open("/umbc/xfs1/cybertrn/reu2021/team1/research/GitHub/evaluation/convlstm/convlstm_image_rolling_filled_actual.npy", "wb") as f:
np.save(f, y_test)
# calculate predicted extent
# train
train_pred_extent = calc_ice_extent(image_train_preds)
train_actual_extent = calc_ice_extent(y_train)
train_extent_rmse = math.sqrt(mean_squared_error(train_actual_extent, train_pred_extent))
print("Last Month Predicted Extent(Train): {}".format(train_pred_extent[-1]))
print("Last Month Actual Extent (Train): {}".format(train_actual_extent[-1]))
print("Train Extent RMSE: {}".format(train_extent_rmse))
print("Train Extent NRMSE: {}".format(train_extent_rmse / | np.mean(train_actual_extent) | numpy.mean |
import numpy as np
from . import VecEnv
class DummyVecEnv(VecEnv):
def __init__(self, env_fns):
self.envs = [fn() for fn in env_fns]
env = self.envs[0]
self.action_space = env.action_space
self.observation_space = env.observation_space
self.ts = np.zeros(len(self.envs), dtype='int')
def step(self, action_n):
results = [env.step(a) for (a,env) in zip(action_n, self.envs)]
obs, rews, dones, infos = map(np.array, zip(*results))
self.ts += 1
for (i, done) in enumerate(dones):
if done:
obs[i] = self.envs[i].reset()
self.ts[i] = 0
return np.array(obs), | np.array(rews) | numpy.array |
import numpy as np
import matplotlib.pyplot as plt
import btk
import matplotlib.patches as patches
def get_rgb(image, min_val=None, max_val=None):
"""Function to normalize 3 band input image to RGB 0-255 image.
Args:
image (array_like): Image array to convert to RGB image with dtype
uint8 [bands, height, width].
min_val (float32 or 3-d float array, default=`None`): Pixel values in
image less than or equal to this are set to zero in the RGB output.
max_val (float32, default=`None`): Pixel values in image greater than
or equal to this are set to zero in the RGB output.
Returns:
uint8 array [height, width, bands] of the input image.
"""
if image.shape[0] != 3:
raise ValueError("Must be 3 channel in dimension 1 of image"
f"Found {image.shape[0]}")
if min_val is None:
min_val = image.min(axis=-1).min(axis=-1)
if max_val is None:
max_val = image.max(axis=-1).max(axis=-1)
new_image = np.transpose(image, axes=(1, 2, 0))
new_image = (new_image - min_val) / (max_val - min_val) * 255
new_image[new_image < 0] = 0
new_image[new_image > 255] = 255
return new_image.astype(np.uint8)
def get_rgb_image(image, normalize_with_image=None):
"""Returns RGB (0-255) image corresponding to the input 3 band image.
If scarlet.display is imported then the normalization is performed by
scarlet Asinh function. If not, a basic normalization is performed.
Args:
image : Image array (float32) to convert to RGB [bands, height, width].
normalize_with_image: Image array (float32) to normalize input image
with [bands, height, width].
Returns:
uint8 array [height, width, bands] of the input image.
"""
try:
import scarlet.display
if normalize_with_image is not None:
norm = scarlet.display.Asinh(img=normalize_with_image, Q=20)
else:
norm = None
img_rgb = scarlet.display.img_to_rgb(image, norm=norm)
except ImportError:
# scarlet not installed, basic normalize image to 0-255
if normalize_with_image is None:
min_val = None
max_val = None
else:
min_val = np.min(normalize_with_image, axis=1).min(axis=-1)
max_val = np.max(normalize_with_image, axis=1).max(axis=-1)
img_rgb = get_rgb(image, min_val=min_val, max_val=max_val)
return img_rgb
def plot_blends(blend_images, blend_list, detected_centers=None,
limits=None, band_indices=[1, 2, 3]):
"""Plots blend images as RGB image, sum in all bands, and RGB image with
centers of objects marked.
Outputs of btk draw are plotted here. Blend_list must contain true centers
of the objects. If detected_centers are input, then the centers are also
shown in the third panel along with the true centers.
Args:
blend_images (array_like): Array of blend scene images to plot
[batch, height, width, bands].
blend_list (list) : List of `astropy.table.Table` with entries of true
objects. Length of list must be the batch size.
detected_centers (list, default=`None`): List of `numpy.ndarray` or
lists with centers of detected centers for each image in batch.
Length of list must be the batch size. Each list entry must be a
list or `numpy.ndarray` of dimensions [N, 2].
limits(list, default=`None`): List of start and end coordinates to
display image within. Note: limits are applied to both height and
width dimensions.
band_indices (list, default=[1,2,3]): list of length 3 with indices of
bands that are to be plotted in the RGB image.
"""
batch_size = len(blend_list)
if len(band_indices) != 3:
raise ValueError(f"band_indices must be a list with 3 entries, not \
{band_indices}")
if detected_centers is None:
detected_centers = [[]] * batch_size
if (len(detected_centers) != batch_size or
blend_images.shape[0] != batch_size):
raise ValueError(f"Length of detected_centers and length of blend_list\
must be equal to first dimension of blend_images, found \
{len(detected_centers), len(blend_list), len(blend_images)}")
for i in range(batch_size):
num = len(blend_list[i])
images = np.transpose(blend_images[i],
axes=(2, 0, 1))
blend_img_rgb = get_rgb_image(images[band_indices])
_, ax = plt.subplots(1, 3, figsize=(8, 3))
ax[0].imshow(blend_img_rgb)
if limits:
ax[0].set_xlim(limits)
ax[0].set_ylim(limits)
ax[0].set_title("gri bands")
ax[0].axis('off')
ax[1].imshow(np.sum(blend_images[i, :, :, :], axis=2))
ax[1].set_title("Sum")
if limits:
ax[1].set_xlim(limits)
ax[1].set_ylim(limits)
ax[1].axis('off')
ax[2].imshow(blend_img_rgb)
ax[2].set_title(f"{num} objects with centers")
for entry in blend_list[i]:
ax[2].plot(entry['dx'], entry['dy'], 'rx')
if limits:
ax[2].set_xlim(limits)
ax[2].set_ylim(limits)
for cent in detected_centers[i]:
ax[2].plot(cent[0], cent[1], 'go', fillstyle='none', ms=10, mew=2)
ax[2].axis('off')
plt.show()
def plot_with_isolated(blend_images, isolated_images, blend_list,
detected_centers=None, limits=None,
band_indices=[1, 2, 3]):
"""Plots blend images and isolated images of all objects in the blend as
RGB images.
Outputs of btk draw are plotted here. Blend_list must contain true centers
of the objects. If detected_centers are input, then the centers are also
shown in the third panel along with the true centers.
Args:
blend_images(array_like): Array of blend scene images to plot
[batch, height, width, bands].
isolated_images (array_like): Array of isolated object images to plot
[batch, max number of objects, height, width, bands].
blend_list(list) : List of `astropy.table.Table` with entries of true
objects. Length of list must be the batch size.
detected_centers(list, default=`None`): List of `numpy.ndarray` or
lists with centers of detected centers for each image in batch.
Length of list must be the batch size. Each list entry must be a
list or `numpy.ndarray` of dimensions [N, 2].
limits(list, default=`None`): List of start and end coordinates to
display image within. Note: limits are applied to both height and
width dimensions.
band_indices (list, default=[1,2,3]): list of length 3 with indices of
bands that are to be plotted in the RGB image.
"""
b_size = len(blend_list)
if len(band_indices) != 3:
raise ValueError(f"band_indices must be a list with 3 entries, not \
{band_indices}")
if detected_centers is None:
detected_centers = [[]] * b_size
if (len(detected_centers) != b_size or len(isolated_images) != b_size or
blend_images.shape[0] != b_size):
raise ValueError(f"Length of detected_centers and length of blend_list\
must be equal to first dimension of blend_images, found \
{len(detected_centers), len(blend_list), len(blend_images)}")
for i in range(len(blend_list)):
images = np.transpose(blend_images[i], axes=(2, 0, 1))
blend_img_rgb = get_rgb_image(
images[band_indices],
normalize_with_image=images[band_indices])
plt.figure(figsize=(2, 2))
plt.imshow(blend_img_rgb)
plt.title(f"{len(blend_list[i])} objects")
if limits:
plt.xlim(limits)
plt.ylim(limits)
plt.axis('off')
for cent in detected_centers[i]:
plt.plot(cent[0], cent[1], 'go', fillstyle='none')
plt.show()
iso_blend = isolated_images[i]
num = iso_blend.shape[0]
plt.figure(figsize=(2 * num, 2))
for j in range(num):
iso_images = np.transpose(iso_blend[j], axes=(2, 0, 1))
iso_img_rgb = get_rgb_image(
iso_images[band_indices],
normalize_with_image=images[band_indices])
plt.subplot(1, num, j + 1)
plt.imshow(iso_img_rgb)
if limits:
plt.xlim(limits)
plt.ylim(limits)
plt.axis('off')
if len(detected_centers[i]) > 0:
plt.plot(detected_centers[i][j][0], detected_centers[i][j][1],
'go', fillstyle='none')
plt.show()
def plot_cumulative(table, column_name, ax=None, bins=40,
color='red', label=None, xlabel=None):
"""Plot cumulative counts of input column_name in table.
Args:
table(`astropy.table.Table`) : Catalog with features as columns and
different samples at rows.
column_name(str): Name of column in input table who's cumulative
counts are to be plotted.
ax(`matplotlib.axes`, default=`None`): Matplotlib axis on which to draw
the plot. If not provided, one is created inside.
bins(int or sequence of scalars, optional, default=40): If bins is an
int, it defines the number of equal-width bins in the given range
(40, by default). If bins is a sequence, it defines a monotonically
increasing array of bin edges, including the rightmost edge,
allowing for non-uniform bin widths.
color(str, default='red'): Color of cumulative counts curve.
label(str, default=`None`): label for legend in plot.
xlabel(str, default=`None`): x-axis label in plot. If not provided,
then the column_name is set as x-axis label.
"""
if xlabel is None:
xlabel = column_name
det_values, det_base = np.histogram(table[column_name], bins=bins)
det_cumulative = np.cumsum(det_values)
if label is None:
ax.plot(det_base[:-1], det_cumulative, c=color)
else:
ax.plot(det_base[:-1], det_cumulative, c=color, label=label)
ax.set_xlabel(xlabel)
ax.set_ylabel('Cumulative counts')
def plot_metrics_summary(summary, num, ax=None):
"""Plot detection summary as a matrix of detection efficiency.
Input argument num sets the maximum number of true detections for which the
detection efficiency matrix is to be created for. Detection efficiency is
computed for number of true objects in the range (1-num)
Args:
summary(`numpy.array`) : Detection summary as a table [N, 5].
num(int): Maximum number of true objects to create matrix for. Number
of columns in matrix will be num-1.
ax(`matplotlib.axes`, default=`None`): Matplotlib axis on which to draw
the plot. If not provided, one is created inside.
"""
if ax is None:
_, ax = plt.subplots(1, 1, figsize=(5, 5))
plt.subplots_adjust(wspace=0.7, hspace=0.3)
results_table = btk.utils.get_detection_eff_matrix(summary, num)
ax.imshow(results_table, origin='left', cmap=plt.cm.Blues)
ax.set_xlabel("# true objects")
# Don't print zero'th column
ax.set_xlim([0.5, num + 0.5])
ax.set_ylabel("# correctly detected objects")
ax.set_xticks(np.arange(1, num + 1, 1.0))
ax.set_yticks(np.arange(0, num + 2, 1.0))
for (j, i), label in | np.ndenumerate(results_table) | numpy.ndenumerate |
import numpy as np
import scipy.stats
import sys
def idx_to_rank_sum(idx, n):
"""Map from an integer idx to a value in the support of
the Wilcoxon test statistic.
Specifically, if S is the support of the test statistic,
then this is a map from
{0, ..., |S| - 1} -> S
Parameters
----------
idx : integer
An integer from {0, ..., |S| - 1}.
n : integer
The number of paired samples.
Returns
-------
rank_sum : integer
The rank_sum corresponding to idx.
Examples
--------
If we have n = 3 paired samples, then the
map is given by
idxes = { 0 1 2 3 4 5 6 }
| | | | | | |
S = { -6 -4 -2 0 2 4 6 }
"""
support_size = int(n*(n+1)/2 + 1)
if idx >= support_size:
raise ValueError("Outside of support")
shift = 1 if support_size % 2 == 0 else 0
middle = (support_size-1)/2
rank_sum = 2*(idx-np.floor(middle)) - shift
rank_sum = int(rank_sum)
return rank_sum
def rank_sum_to_idx(rank_sum, n):
"""Map from a rank sum in the support of the Wilcoxon
test statistic to an integer idx.
This is the inverse mapping of idx_to_rank_sum.
Specifically, if S is the support of the test statistic,
then this is a map from
S -> {0, ..., |S| - 1}
Parameters
----------
rank_sum : integer
An integer in the support of the test statistic
n : integer
The number of paired samples.
Returns
-------
idx : integer
The idx in the support corresponding to rank_sum.
Examples
--------
If we have n = 3 paired samples, then the
map is given by
S = { -6 -4 -2 0 2 4 6 }
| | | | | | |
idxes = { 0 1 2 3 4 5 6 }
"""
if rank_sum < -n*(n+1)/2 or rank_sum > n*(n+1)/2:
raise ValueError("Outside of support")
support_size = int(n*(n+1)/2 + 1)
shift = 1 if support_size % 2 == 0 else 0
middle = (support_size-1)/2
for i in range(int(support_size)):
if rank_sum == 2*(i-np.floor(middle)) - shift:
return i
def compute_counts(n):
"""Recursively counts the coefficients of each term in the mgf.
The mgf is given by
mgf_X(t) = 1/2^n \prod_{j=1}^n (e^{-tj} + e^{tj})
= 1/2^n \sum_{j=1}^n c_j e^{tj}
= \sum_{j=1}^n Pr(X = j) e^{tj}
This function computes c_j for a number of paired samples n.
By expanding and collecting e^{tj} for each j, we can compute
the pmf of the Wilcoxon test statistic. We can do this recursively
by noticing that
1/2^n \prod_{j=1}^n (e^{-tj} + e^{tj})
= 1/2^n (e^{-nt} + e^{nt}) \prod_{j=1}^{n-1} (e^{-tj} + e^{tj})
Thus, if we have the coefficients c_j for n-1, we can compute the
coefficients c_j for n by expanding the bottom equation and counting
e^{tj} for each j.
.
References
----------
[1] Hogg, <NAME>., <NAME>, and <NAME>. Introduction
to Mathematical Statistics. 7th Edition. pp. 541-544.
"""
if n == 1:
counts = np.array([1, 1])
return counts
else:
counts = compute_counts(n-1)
support_size = int(n*(n+1)/2 + 1)
new_counts = np.zeros(support_size)
for i in range(counts.size):
rank_sum = idx_to_rank_sum(i, n-1)
new_rank_sum1 = rank_sum + n
new_rank_sum2 = rank_sum - n
min_rank = -n*(n+1)/2
max_rank = n*(n+1)/2
if counts[i] > 0:
if new_rank_sum1 <= max_rank:
new_counts[rank_sum_to_idx(new_rank_sum1, n)] += counts[i]
if new_rank_sum2 >= min_rank:
new_counts[rank_sum_to_idx(new_rank_sum2, n)] += counts[i]
return new_counts
def compute_pmf(n):
"""Compute the support and pmf given n paired samples.
Parameters
----------
n : integer
The number of paired samples
Returns
-------
support : numpy array
An array giving the support of the pmf
pmf : numpy array
An array giving the probability of each integer in the support
"""
support_size = int(n*(n+1)/2 + 1)
support = np.array([idx_to_rank_sum(i, n) for i in range(support_size)])
pmf = compute_counts(n)/np.power(2,n)
assert np.abs(pmf.sum() - 1) < 1E-8, pmf.sum()
return support, pmf
def wilcoxon_exact(x, y=None, alternative="two-sided"):
"""
Calculate the Wilcoxon signed-rank test statistic and exact p-values.
Given matched samples, x_i and y_i, the Wilcoxon signed-rank test tests the
null that x_i - y_i is symmetric around zero. In practice, it is used to test
whether x_i and y_i are from the same population with different location
parameters.
There are several different versions of the test statistic. The one used here
is
T = sign(z_1) R|z_1| + ... + sign(z_n) R|z_n|
where
z_i = x_i - y_i if y_i is specified
z_i = x_i otherwise.
The pmf has no closed form, but for small sample sizes it is possible to compute
the pmf by solving for the coefficients of the moment generating function.
Parameters
----------
x : array_like
The first set of data values (if y is specified) or the difference
between two sets of data values
y : array_like optional
If specified, the difference x - y is used for the test statistic
alternative : {"two-sided", "greater", "less"}, optional
The alternative hypothesis tested.
If "two-sided", the test is
x_i - y_i > 0 or x_i - y_i < 0
If "greater", the test it
x_i - y_i > 0
If "less", the test is
x_i - y_i < 0
Returns
-------
T : float
The test-statistic.
p : float
The p-value.
Examples
--------
>>> import numpy as np
>>> from wilcoxon_exact import wilcoxon_exact
>>> x = np.array([1.83, 0.50, 1.62, 2.48, 1.68, 1.88, 1.55, 3.06, 1.30])
>>> y = np.array([0.878, 0.647, 0.598, 2.05, 1.06, 1.29, 1.06, 3.14, 1.29])
>>> wilcoxon_exact(x, y, alternative="greater")
(35.0, 0.01953125)
>>> x = np.array([-6.1, 4.3, 7.2, 8.0, -2.1])
>>> wilcoxon_exact(x, alternative="two-sided")
(7.0, 0.4375)
"""
if alternative not in ["two-sided", "less", "greater"]:
raise ValueError("Alternative must be either 'two-sided'", "'greater' or 'less'")
x = | np.array(x) | numpy.array |
"""
The pycity_scheduling framework
Copyright (C) 2022,
Institute for Automation of Complex Power Systems (ACS),
E.ON Energy Research Center (E.ON ERC),
RWTH Aachen University
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation the
rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit
persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import numpy as np
import unittest
import datetime
import logging
import warnings
import pyomo.environ as pyomo
from pyomo.opt import TerminationCondition
from shapely.geometry import Point
from pycity_scheduling import constants, solvers
from pycity_scheduling.classes import *
from pycity_scheduling.util.metric import *
class TestModule(unittest.TestCase):
def test_filter_entities(self):
e = get_env(4, 8)
bd = Building(e)
bes = BuildingEnergySystem(e)
pv = Photovoltaic(e, 0)
bes.addDevice(pv)
bd.addEntity(bes)
def do_test(gen):
entities = list(gen)
self.assertEqual(1, len(entities))
self.assertIn(pv, entities)
do_test(filter_entities(bd.get_entities(), 'PV'))
do_test(filter_entities(bd, 'generation_devices'))
do_test(filter_entities(bd, [Photovoltaic]))
do_test(filter_entities(bd, ['PV']))
do_test(filter_entities(bd, {'PV': Photovoltaic}))
with self.assertRaises(ValueError):
next(filter_entities(bd, 'PPV'))
with self.assertRaises(ValueError):
next(filter_entities(bd, [int]))
with self.assertRaises(ValueError):
next(filter_entities(bd, None))
return
class TestBattery(unittest.TestCase):
def setUp(self):
e = get_env(3)
self.bat = Battery(e, 10, 20, soc_init=0.875, eta=0.5)
return
def test_populate_model(self):
model = pyomo.ConcreteModel()
self.bat.populate_model(model)
model.c1 = pyomo.Constraint(expr=self.bat.model.e_el_vars[2] == 10)
model.c2 = pyomo.Constraint(expr=self.bat.model.e_el_vars[0] == 5)
obj = pyomo.sum_product(self.bat.model.p_el_demand_vars, self.bat.model.p_el_demand_vars)
model.o = pyomo.Objective(expr=obj)
result = solve_model(model)
# TODO stats are currently not correct due to a pyomo bug
# use result as a workaround
#model.compute_statistics()
#stats = model.statistics
#self.assertEqual(12, stats.number_of_variables)
self.assertEqual(13, result.Problem[0].number_of_variables)
var_sum = pyomo.value(pyomo.quicksum(self.bat.model.p_el_vars[t] for t in range(1, 3)))
self.assertAlmostEqual(40, var_sum, places=5)
var_sum = pyomo.value(pyomo.quicksum(
self.bat.model.p_el_supply_vars[t] + self.bat.model.p_el_demand_vars[t] for t in range(1, 3)
))
self.assertAlmostEqual(40, var_sum, places=5)
return
def test_update_model(self):
model = pyomo.ConcreteModel()
self.bat.populate_model(model)
demand_var = self.bat.model.p_el_vars
self.bat.update_model()
model.c1 = pyomo.Constraint(expr=self.bat.model.e_el_vars[0] == 10)
obj = pyomo.sum_product(demand_var, demand_var)
model.o = pyomo.Objective(expr=obj)
solve_model(model)
self.assertAlmostEqual(10, pyomo.value(demand_var[0]), places=5)
return
def test_update_schedule(self):
model = pyomo.ConcreteModel()
self.bat.populate_model(model)
self.bat.update_model()
self.bat.model.p_el_demand_vars.setlb(3.0)
self.bat.model.p_el_demand_vars.setub(3.0)
self.bat.model.p_el_supply_vars.setlb(0.0)
self.bat.model.p_el_supply_vars.setub(0.0)
obj = pyomo.sum_product(self.bat.model.p_el_demand_vars, self.bat.model.p_el_demand_vars)
model.o = pyomo.Objective(expr=obj)
solve_model(model)
self.bat.update_schedule()
assert_equal_array(self.bat.p_el_schedule, [3] * 3)
assert_equal_array(self.bat.e_el_schedule, 0.875 * 10 + np.arange(1, 4)*3*0.25*0.5)
return
def test_calculate_co2(self):
self.bat.p_el_schedule = np.array([10]*3)
self.assertEqual(0, calculate_co2(self.bat))
return
def test_get_objective(self):
model = pyomo.ConcreteModel()
self.bat.populate_model(model)
obj = self.bat.get_objective(2)
vs = list(pyomo.current.identify_variables(obj))
for t in range(3):
self.assertIn(self.bat.model.p_el_vars[t], vs)
self.bat.model.p_el_vars[t] = t * 5
self.assertEqual(3, len(vs))
self.assertEqual(sum(2*(5*t)**2 for t in range(3)), pyomo.value(obj))
return
def test_e_ini(self):
expected_schedule = list(range(4, 21, 2))
e = get_env(3, 9, 2)
model = pyomo.ConcreteModel()
bat = Battery(e, 20, 10, soc_init=0.1, eta=0.8)
bat.populate_model(model)
model.o = pyomo.Objective(expr=-bat.model.e_el_vars[2])
for t in range(4):
bat.update_model()
solve_model(model)
bat.update_schedule()
e.timer.mpc_update()
assert_equal_array(bat.e_el_schedule, expected_schedule[:3+t*2] + [0] * 2 * (3-t))
assert_equal_array(bat.p_el_schedule, [10] * (3 + t * 2) + [0] * 2 * (3 - t))
assert_equal_array(bat.p_el_demand_schedule, [10] * (3 + t * 2) + [0] * 2 * (3 - t))
assert_equal_array(bat.p_el_supply_schedule, [0] * 9)
return
def test_no_discharge(self):
e = get_env(9, 9)
model = pyomo.ConcreteModel()
bat = Battery(e, 30, 10, p_el_max_discharge=0, soc_init=0.5, eta=1)
bat.populate_model(model)
bat.update_model()
model.o = pyomo.Objective(expr=pyomo.sum_product(bat.model.p_el_vars))
solve_model(model)
bat.update_schedule()
assert_equal_array(bat.e_el_schedule, [15] * 9)
assert_equal_array(bat.p_el_schedule, [0] * 9)
assert_equal_array(bat.p_el_demand_schedule, [0] * 9)
assert_equal_array(bat.p_el_supply_schedule, [0] * 9)
return
class TestBoiler(unittest.TestCase):
def setUp(self):
e = get_env(4, 8)
self.bl = Boiler(e, 10, 0.4)
return
def test_calculate_co2(self):
self.bl.p_th_heat_schedule = - np.array([10] * 8)
self.bl.p_th_heat_ref_schedule = - | np.array([4] * 8) | numpy.array |
# Imports
from scipy.io import loadmat
from scipy.signal import fftconvolve
import numpy as np
import gc as garbageCollector
########################################################################################################################
# Load signals from a specific file in the source files
# Convenience function to load signals
def loadSignals(recordName, dataPath, dataInDirectory):
if dataInDirectory:
signals = loadmat(dataPath + recordName + '/' + recordName + '.mat')
else:
signals = loadmat(dataPath + recordName + '.mat')
signals = signals['val']
garbageCollector.collect()
return signals
########################################################################################################################
# Loads and prepossesses signals from a specific record
def extractWholeRecord(recordName,
dataPath='PATH/',
dataInDirectory=True):
# Keep all channels except ECG
keepChannels = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
signals = loadSignals(recordName, dataPath, dataInDirectory)
signals = np.transpose(signals).astype(np.float64)
# Apply antialiasing FIR filter to each channel and downsample to 50Hz
filtCoeff = np.array([0.00637849379422531, 0.00543091599801427, -0.00255136650039784, -0.0123109503066702,
-0.0137267267561505, -0.000943230632358082, 0.0191919895027550, 0.0287148886882440,
0.0123598773891149, -0.0256928886371578, -0.0570987715759348, -0.0446385294777459,
0.0303553522906817, 0.148402006671856, 0.257171285176269, 0.301282456398562,
0.257171285176269, 0.148402006671856, 0.0303553522906817, -0.0446385294777459,
-0.0570987715759348, -0.0256928886371578, 0.0123598773891149, 0.0287148886882440,
0.0191919895027550, -0.000943230632358082, -0.0137267267561505, -0.0123109503066702,
-0.00255136650039784, 0.00543091599801427, 0.00637849379422531])
for n in range(signals.shape[1]):
signals[::, n] = np.convolve(signals[::, n], filtCoeff, mode='same')
signals = signals[0::4, keepChannels]
garbageCollector.collect()
# Scale SaO2 to sit between -0.5 and 0.5, a good range for input to neural network
signals[::, 11] += -32768.0
signals[::, 11] /= 65535.0
signals[::, 11] -= 0.5
# Normalize all the other channels by removing the mean and the rms in an 18 minute rolling window, using fftconvolve for computational efficiency
# 18 minute window is used because because baseline breathing is established in 2 minute window according to AASM standards.
# Normalizing over 18 minutes ensure a 90% overlap between the beginning and end of the baseline window
kernel_size = (50*18*60)+1
# Remove DC bias and scale for FFT convolution
center = | np.mean(signals, axis=0) | numpy.mean |
import numpy as np
from numpy import transpose as tp
import scipy.signal as sig
import scipy.stats as scistat
import filterbanks as fb
class SoundTexture(object):
"""
Based on <NAME>'s Matlab toolbox:
http://mcdermottlab.mit.edu/Sound_Texture_Synthesis_Toolbox_v1.7.zip
y = audio file
fs = sample rate
"""
def __init__(self, y, fs):
self.y = y
self.fs = fs
# default settings:
self.desired_rms = .01
self.audio_sr = 20000
self.n_audio_channels = 30
self.low_audio_f = 20
self.hi_audio_f = 10000
self.use_more_audio_filters = 0
self.lin_or_log_filters = 1
self.env_sr = 400
self.n_mod_channels = 20
self.low_mod_f = 0.5
self.hi_mod_f = 200
self.use_more_mod_filters = 0
self.mod_filt_Q_value = 2
self.use_zp = 0
self.low_mod_f_c12 = 1
self.compression_option = 1
self.comp_exponent = .3
self.log_constant = 10 ** -12
self.match_env_hist = 0
self.match_sub_hist = 0
self.n_hist_bins = 128
self.manual_mean_var_adjustment = 0
self.max_orig_dur_s = 7
self.desired_synth_dur_s = 5
self.measurement_windowing = 2
self.imposition_windowing = 1
self.win_steepness = .5
self.imposition_method = 1
self.sub_imposition_order = 1
self.env_ac_intervals_smp = np.array([1, 2, 3, 4, 5, 6, 7, 9, 11, 14, 18, 22, 28, 36, 45, 57, 73, 92, 116, 148, 187, 237, 301]) # in samples
self.sub_ac_undo_win = 1
self.sub_ac_win_choice = 2
self.num_sub_ac_period = 5
# allocate memory:
self.mod_c2 = []
self.mod_c1 = []
self.env_c = []
self.subband_ac = []
self.mod_power_center_freqs = []
self.mod_c2_center_freqs = []
self.mod_c1_center_freqs = []
self.audio_cutoffs_hz = []
self.subband_mean = np.zeros(self.n_audio_channels + 2)
self.subband_var = np.zeros(self.n_audio_channels + 2)
self.subband_skew = np.zeros(self.n_audio_channels + 2)
self.subband_kurt = np.zeros(self.n_audio_channels + 2)
self.env_mean = np.zeros(self.n_audio_channels + 2)
self.env_var = np.zeros(self.n_audio_channels + 2)
self.env_skew = np.zeros(self.n_audio_channels + 2)
self.env_kurt = np.zeros(self.n_audio_channels + 2)
self.subband_hist = np.zeros([self.n_audio_channels + 2 + 1, self.n_hist_bins])
self.subband_bins = np.zeros([self.n_audio_channels + 2 + 1, self.n_hist_bins])
self.env_hist = np.zeros([self.n_audio_channels + 2, self.n_hist_bins])
self.env_bins = np.zeros([self.n_audio_channels + 2, self.n_hist_bins])
self.env_ac = np.zeros([self.n_audio_channels + 2, self.env_ac_intervals_smp.shape[0]])
self.mod_power = np.zeros([self.n_audio_channels + 2, self.n_mod_channels])
self.subband_ac_power = np.zeros(self.n_audio_channels + 2)
# calculate stats:
self.orig_sound, self.ds_factor = self.format_orig_sound()
self.measurement_win = self.set_measurement_window(self.orig_sound.shape[0], self.measurement_windowing)
self.measure_texture_stats(self.orig_sound, self.measurement_win)
def format_orig_sound(self):
orig_sound = self.y
if orig_sound.ndim == 2:
orig_sound = (orig_sound[:, 0] + orig_sound[:, 1]) / 2 # if stereo convert to mono
if self.fs != self.audio_sr:
orig_sound = sig.resample(orig_sound, int(orig_sound.shape[0] * self.audio_sr / self.fs))
if np.remainder(orig_sound.shape[0], 2) == 1:
orig_sound = np.concatenate([orig_sound, np.array([0])])
ds_factor = self.audio_sr / self.env_sr
new_l = int(np.floor((orig_sound.shape[0] / ds_factor / 2) * ds_factor * 2))
orig_sound = orig_sound[:new_l]
orig_sound = orig_sound / np.sqrt(np.mean(np.square(orig_sound))) * self.desired_rms
return orig_sound, ds_factor
def set_measurement_window(self, sound_length, windowing_option):
if windowing_option == 1:
measurement_win = np.ones([int(sound_length / self.ds_factor), 1])
elif windowing_option == 2:
temp = self.make_windows_rcos_flat_no_ends(int(sound_length / self.ds_factor), int(np.round(sound_length / self.audio_sr)), self.win_steepness)
measurement_win = np.sum(temp, 1)
else:
raise Exception('measurement_win must be 1 or 2')
return measurement_win
@staticmethod
def make_windows_rcos_flat_no_ends(signal_length_smp, num_secs, ramp_prop):
num_secs = num_secs + 2
if ramp_prop == 0.5:
ramp_length_smp = int(np.floor(signal_length_smp / (num_secs - 1)))
flat_length_smp = 0
elif ramp_prop < 0.5:
flat_length = signal_length_smp / (num_secs * (1 - ramp_prop) / (1 - 2 * ramp_prop) - ramp_prop / (1 - 2 * ramp_prop))
ramp_length_smp = int(np.floor(flat_length * ramp_prop / (1 - 2 * ramp_prop)))
flat_length_smp = int(np.floor(flat_length))
else:
raise Exception('ramp_prop must be less than .5')
windows = np.zeros([signal_length_smp, num_secs])
windows[:flat_length_smp, 0] = 2
windows[flat_length_smp: flat_length_smp + ramp_length_smp, 0] = np.cos(np.linspace(1, ramp_length_smp, num=ramp_length_smp) / ramp_length_smp * np.pi) + 1
start_pt = flat_length_smp
for n in range(0, num_secs - 2):
windows[start_pt:start_pt+ramp_length_smp, n+1] = np.cos(np.linspace(-ramp_length_smp+1, 0, num=ramp_length_smp) / ramp_length_smp * np.pi) + 1
windows[start_pt+ramp_length_smp:start_pt+ramp_length_smp+flat_length_smp, n+1] = 2
windows[start_pt+ramp_length_smp+flat_length_smp:start_pt+2*ramp_length_smp+flat_length_smp, n+1] = np.cos(np.linspace(1, ramp_length_smp, num=ramp_length_smp) / ramp_length_smp * np.pi) + 1
start_pt = start_pt + flat_length_smp + ramp_length_smp
windows[start_pt:start_pt+ramp_length_smp, num_secs-1] = np.cos(np.linspace(-ramp_length_smp + 1, 0, num=ramp_length_smp) / ramp_length_smp * np.pi) + 1
windows[start_pt + ramp_length_smp:signal_length_smp, num_secs-1] = 2
windows = windows[:, 1:-1]
windows = windows / 2
return windows
@staticmethod
def stat_central_moment_win(x, n, win, x_mean=-99):
win = win / np.sum(win)
if x_mean == -99:
x_mean = np.sum(win * x)
if n == 1:
m = x_mean
elif n == 2:
m = np.sum(win * ((x - x_mean) ** 2))
m = np.sqrt(m) / x_mean
elif n == 3:
m2 = np.sum(win * ((x - x_mean) ** 2))
m = np.sum(win * ((x - x_mean) ** 3)) / (m2 ** (3.0 / 2.0))
elif n == 4:
m2 = np.sum(win * ((x - x_mean) ** 2))
m = np.sum(win * ((x - x_mean) ** 4)) / (m2 ** 2)
else:
raise Exception('input value of n not recognised')
return m
@staticmethod
def shift_s(s, num_samples):
if num_samples == 0:
new_s = s
elif num_samples < 0:
new_s = np.concatenate([s[-num_samples:], np.zeros(-num_samples)])
else:
new_s = np.concatenate([np.zeros(num_samples), s[:-num_samples]])
return new_s
def stat_env_ac_scaled_win(self, f_env, sample_spacing, use_zp, win):
if use_zp != 0:
raise Exception('zero padding not implemented')
win = win / np.sum(win)
ac_values = np.zeros(sample_spacing.shape[0])
for p in range(0, sample_spacing.shape[0]):
num_samp = sample_spacing[p]
meanf_env = np.mean(f_env[:, p])
mf_env = f_env[:, p] - meanf_env
env_var = np.mean(mf_env ** 2)
ac_values[p] = np.sum(win * (self.shift_s(mf_env, -num_samp) * self.shift_s(mf_env, num_samp))) / env_var
return ac_values
@staticmethod
def stat_var_win(s, win):
win = win / np.sum(win)
w_var = np.sum(win * (s - np.sum(win * s)) ** 2)
return w_var
def stat_mod_power_win(self, s, mod_subbands, use_zp, win):
if use_zp != 0:
raise Exception('zero padding not implemented')
win = win / np.sum(win)
s_var = self.stat_var_win(s, win)
mp = np.sum(np.dot(win[:, None], np.ones([1, mod_subbands.shape[1]])) * (mod_subbands ** 2), 0) / s_var
return mp
@staticmethod
def stat_mod_c2_win(subbands, use_zp, win):
if use_zp != 0:
raise Exception('zero padding not implemented')
win = win / np.sum(win)
analytic_subbands = np.transpose(sig.hilbert(np.transpose(subbands)))
n = analytic_subbands.shape[1]
c2 = np.zeros([n-1, 2])
for k in range(0, n-1):
c = (analytic_subbands[:, k] ** 2) / np.abs(analytic_subbands[:, k])
sig_cw = np.sqrt(np.sum(win * (np.real(c) ** 2)))
sig_fw = np.sqrt(np.sum(win * (np.real(analytic_subbands[:, k+1]) ** 2)))
c2[k, 0] = np.sum(win * np.real(c) * np.real(analytic_subbands[:, k+1])) / (sig_cw * sig_fw)
c2[k, 1] = np.sum(win * np.real(c) * np.imag(analytic_subbands[:, k + 1])) / (sig_cw * sig_fw)
return c2
@staticmethod
def stat_corr_filt_win_full(f_envs, use_zp, win):
if use_zp != 0:
raise Exception('zero padding not implemented')
win = win / np.sum(win)
cbc_value = np.zeros([f_envs.shape[1], f_envs.shape[1]])
meanf_envs = np.mean(f_envs, 0)[None, :]
mf_envs = f_envs - np.dot(np.ones([f_envs.shape[0], 1]), meanf_envs)
env_stds = np.sqrt(np.mean(mf_envs ** 2, 0))[None, :]
cbc_value[:, :] = np.dot(np.transpose((np.dot(win[:, None], np.ones([1, f_envs.shape[1]]))) * mf_envs), mf_envs) / np.dot(np.transpose(env_stds), env_stds)
return cbc_value
@staticmethod
def autocorr_mult(x):
xf = np.transpose(np.fft.fft(np.transpose(x)))
xf2 = | np.abs(xf) | numpy.abs |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import itertools
import warnings
import segyio
from os import path
import scipy
from cv_lib.utils import generate_path, mask_to_disk, image_to_disk
from matplotlib import pyplot as plt
from PIL import Image
# bugfix for scipy imports
import scipy.misc
import numpy as np
import torch
from toolz import curry
from torch.utils import data
import logging
from deepseismic_interpretation.dutchf3.utils.batch import (
interpolate_to_fit_data,
parse_labels_in_image,
get_coordinates_for_slice,
)
def _train_data_for(data_dir):
return path.join(data_dir, "train", "train_seismic.npy")
def _train_labels_for(data_dir):
return path.join(data_dir, "train", "train_labels.npy")
def _test1_data_for(data_dir):
return path.join(data_dir, "test_once", "test1_seismic.npy")
def _test1_labels_for(data_dir):
return path.join(data_dir, "test_once", "test1_labels.npy")
def _test2_data_for(data_dir):
return path.join(data_dir, "test_once", "test2_seismic.npy")
def _test2_labels_for(data_dir):
return path.join(data_dir, "test_once", "test2_labels.npy")
def read_labels(fname, data_info):
"""
Read labels from an image.
Args:
fname: filename of labelling mask (image)
data_info: dictionary describing the data
Returns:
list of labels and list of coordinates
"""
# Alternative writings for slice-type
inline_alias = ["inline", "in-line", "iline", "y"]
crossline_alias = ["crossline", "cross-line", "xline", "x"]
timeslice_alias = ["timeslice", "time-slice", "t", "z", "depthslice", "depth"]
label_imgs = []
label_coordinates = {}
# Find image files in folder
tmp = fname.split("/")[-1].split("_")
slice_type = tmp[0].lower()
tmp = tmp[1].split(".")
slice_no = int(tmp[0])
if slice_type not in inline_alias + crossline_alias + timeslice_alias:
print("File:", fname, "could not be loaded.", "Unknown slice type")
return None
if slice_type in inline_alias:
slice_type = "inline"
if slice_type in crossline_alias:
slice_type = "crossline"
if slice_type in timeslice_alias:
slice_type = "timeslice"
# Read file
print("Loading labels for", slice_type, slice_no, "with")
img = scipy.misc.imread(fname)
img = interpolate_to_fit_data(img, slice_type, slice_no, data_info)
label_img = parse_labels_in_image(img)
# Get coordinates for slice
coords = get_coordinates_for_slice(slice_type, slice_no, data_info)
# Loop through labels in label_img and append to label_coordinates
for cls in np.unique(label_img):
if cls > -1:
if str(cls) not in label_coordinates.keys():
label_coordinates[str(cls)] = np.array(np.zeros([3, 0]))
inds_with_cls = label_img == cls
cords_with_cls = coords[:, inds_with_cls.ravel()]
label_coordinates[str(cls)] = np.concatenate((label_coordinates[str(cls)], cords_with_cls), 1)
print(" ", str(np.sum(inds_with_cls)), "labels for class", str(cls))
if len(np.unique(label_img)) == 1:
print(" ", 0, "labels", str(cls))
# Add label_img to output
label_imgs.append([label_img, slice_type, slice_no])
return label_imgs, label_coordinates
class SectionLoader(data.Dataset):
"""
Base class for section data loader
:param config: configuration object to define other attributes in loaders
:param str split: split file to use for loading patches
:param bool is_transform: Transform patch to dimensions expected by PyTorch
:param list augmentations: Data augmentations to apply to patches
:param bool debug: enable debugging output
"""
def __init__(self, config, split="train", is_transform=True, augmentations=None, debug=False):
self.data_dir = config.DATASET.ROOT
self.n_classes = config.DATASET.NUM_CLASSES
self.MIN = config.DATASET.MIN
self.MAX = config.DATASET.MAX
self.split = split
self.is_transform = is_transform
self.augmentations = augmentations
self.sections = list()
self.debug = debug
def __len__(self):
return len(self.sections)
def __getitem__(self, index):
section_name = self.sections[index]
direction, number = section_name.split(sep="_")
if direction == "i":
im = self.seismic[int(number), :, :]
lbl = self.labels[int(number), :, :]
elif direction == "x":
im = self.seismic[:, int(number), :]
lbl = self.labels[:, int(number), :]
im, lbl = _transform_WH_to_HW(im), _transform_WH_to_HW(lbl)
if self.debug and "test" in self.split:
outdir = f"debug/test/sectionLoader_{self.split}_raw"
generate_path(outdir)
path_prefix = f"{outdir}/index_{index}_section_{section_name}"
image_to_disk(im, path_prefix + "_img.png", self.MIN, self.MAX)
mask_to_disk(lbl, path_prefix + "_lbl.png", self.n_classes)
if self.augmentations is not None:
augmented_dict = self.augmentations(image=im, mask=lbl)
im, lbl = augmented_dict["image"], augmented_dict["mask"]
if self.is_transform:
im, lbl = self.transform(im, lbl)
if self.debug and "test" in self.split:
outdir = f"debug/test/sectionLoader_{self.split}_{'aug' if self.augmentations is not None else 'noaug'}"
generate_path(outdir)
path_prefix = f"{outdir}/index_{index}_section_{section_name}"
image_to_disk( | np.array(im[0]) | numpy.array |
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import torchvision.transforms as T
import matplotlib.patches as patches
import matplotlib.animation as animation
import matplotlib.markers as mmarkers
import numpy as np
from PIL import Image
from kivy.vector import Vector
from collections import Counter
import imgutils
class CarEnv(object):
def __init__(self, filename):
self.filename = filename
img = Image.open(self.filename).convert('L')
self.sand = np.asarray(img)/255
self.sand = self.sand.astype(int)
self.max_y, self.max_x = self.sand.shape
self.pos = Vector(int(self.max_x/2), int(self.max_y/2))
self.angle = Vector(10,0).angle(self.pos)
self.velocity = Vector(6, 0)
self.wall_padding = 5
self.max_angle = 20
self.max_action = [self.max_angle]
self.crop_size = 100
self.goal_iter = 0
self.goals = [Vector(1890, 150), Vector(140, 380)]
self.last_distance = 0
self.state_dim = (32, 3)
self.action_dim = (1,)
self._max_episode_steps = 5000
# track rewards distribution
self.rewards_distribution = Counter()
self.target = Vector(335, 178)
self.max_distance = 1574.
def seed(self, seed):
torch.manual_seed(seed)
np.random.seed(seed)
def reset(self):
self.angle = np.random.randint(low=-180, high=180)
onsand = True
while onsand:
self.pos.x = np.random.randint(low=self.wall_padding, high=self.max_x-self.wall_padding)
self.pos.y = np.random.randint(low=self.wall_padding, high=self.max_y-self.wall_padding)
if self.sand[int(self.pos.y),int(self.pos.x)] == 0:
onsand = False
self.velocity = Vector(0.5, 0).rotate(self.angle)
return self.get_state()
def random_action(self):
rotation = | np.random.uniform(low=-self.max_angle, high=self.max_angle) | numpy.random.uniform |
# -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
import utility_functions as utilfunc
import sys
import config
# Import from support function repo
import dispatch_functions as dFuncs
import tariff_functions as tFuncs
import decorators
np.seterr(divide='ignore', invalid='ignore')
#==============================================================================
# Load logger
logger = utilfunc.get_logger()
#==============================================================================
#%%
def calc_system_size_and_financial_performance(agent):
"""
This function accepts the characteristics of a single agent and
evaluates the financial performance of a set of solar+storage
system sizes. The system size with the highest NPV is selected.
Parameters
----------
agent : pandas.Series
Single agent (row) from an agent dataframe.
Returns
-------
pandas.Series
Agent with system size, business model and corresponding financial performance.
"""
#=========================================================================#
# Setup
#=========================================================================#
try:
in_cols = list(agent.index)
if config.VERBOSE:
logger.info(' ')
logger.info("\tRunning system size calculations for: {}, {}, {}".format(agent['state'], agent['tariff_class'], agent['sector_abbr']))
logger.info('real_discount: {}'.format(agent['discount_rate']))
logger.info('loan_rate: {}'.format(agent['loan_rate']))
logger.info('down_payment: {}'.format(agent['down_payment']))
# Set resolution of dispatcher
d_inc_n_est = 10
DP_inc_est = 12
d_inc_n_acc = 20
DP_inc_acc = 12
# Extract load profile
load_profile = np.array(agent['consumption_hourly'])
agent.loc['timesteps_per_year'] = 1
# Extract load profile
pv_cf_profile = np.array(agent['solar_cf_profile']) / 1e3
agent['naep'] = float(np.sum(pv_cf_profile))
# Create battery object
batt = dFuncs.Battery()
batt_ratio = 3.0
tariff = tFuncs.Tariff(dict_obj=agent.loc['tariff_dict'])
# Create export tariff object
if agent['nem_system_size_limit_kw'] != 0:
export_tariff = tFuncs.Export_Tariff(full_retail_nem=True)
export_tariff.periods_8760 = tariff.e_tou_8760
export_tariff.prices = tariff.e_prices_no_tier
else:
export_tariff = tFuncs.Export_Tariff(full_retail_nem=False)
original_bill, original_results = tFuncs.bill_calculator(load_profile, tariff, export_tariff)
if config.VERBOSE:
logger.info('original_bill: {}'.format(original_bill))
agent['first_year_elec_bill_without_system'] = original_bill * agent['elec_price_multiplier']
if config.VERBOSE:
logger.info('multiplied original bill: {}'.format(agent['first_year_elec_bill_without_system']))
if agent['first_year_elec_bill_without_system'] == 0:
agent['first_year_elec_bill_without_system']=1.0
agent['first_year_elec_cents_per_kwh_without_system'] = agent['first_year_elec_bill_without_system'] / agent['load_per_customer_in_bin_kwh']
#=========================================================================#
# Estimate bill savings revenue from a set of solar+storage system sizes
#=========================================================================#
max_size_load = agent.loc['load_per_customer_in_bin_kwh']/agent.loc['naep']
max_size_roof = agent.loc['developable_roof_sqft'] * agent.loc['developable_buildings_pct'] * agent.loc['pv_power_density_w_per_sqft']/1000.0
agent.loc['max_pv_size'] = min([max_size_load, max_size_roof, agent.loc['nem_system_size_limit_kw']])
if config.VERBOSE:
logger.info('max_size_load: {}'.format(max_size_load))
logger.info('max_size_roof: {}'.format(max_size_roof))
dynamic_sizing = True #False
if dynamic_sizing:
pv_sizes = np.arange(0, 1.1, 0.1) * agent.loc['max_pv_size']
else:
# Size the PV system depending on NEM availability, either to 95% of load w/NEM, or 50% w/o NEM. In both cases, roof size is a constraint.
if export_tariff.full_retail_nem==True:
pv_sizes = np.array([min(max_size_load * 0.95, max_size_roof)])
else:
pv_sizes = np.array([min(max_size_load * 0.5, max_size_roof)])
batt_powers = np.zeros(1)
# Calculate the estimation parameters for each PV size
est_params_df = pd.DataFrame(index=pv_sizes)
est_params_df['estimator_params'] = 'temp'
for pv_size in pv_sizes:
load_and_pv_profile = load_profile - pv_size*pv_cf_profile
est_params_df.at[pv_size, 'estimator_params'] = dFuncs.calc_estimator_params(load_and_pv_profile, tariff, export_tariff, batt.eta_charge, batt.eta_discharge)
# Create df with all combinations of solar+storage sizes
system_df = pd.DataFrame(dFuncs.cartesian([pv_sizes, batt_powers]), columns=['pv', 'batt_kw'])
system_df['est_bills'] = None
pv_kwh_by_year = np.array([sum(x) for x in np.split(np.array(pv_cf_profile), agent.loc['timesteps_per_year'])])
pv_kwh_by_year = np.concatenate([(pv_kwh_by_year - ( pv_kwh_by_year * agent.loc['pv_deg'] * i)) for i in range(1, agent.loc['economic_lifetime']+1)])
system_df['kwh_by_timestep'] = system_df['pv'].apply(lambda x: x * pv_kwh_by_year)
n_sys = len(system_df)
for i in system_df.index:
pv_size = system_df['pv'][i].copy()
load_and_pv_profile = load_profile - pv_size*pv_cf_profile
# for buy all sell all agents: calculate value of generation based on wholesale prices and subtract from original bill
if agent.loc['compensation_style'] == 'Buy All Sell All':
sell_all = np.sum(pv_size * pv_cf_profile * agent.loc['wholesale_elec_use_per_kwh'])
system_df.loc[i, 'est_bills'] = original_bill - sell_all
# for net billing agents: if system size within policy limits, set sell rate to wholesale price -- otherwise, set sell rate to 0
elif (agent.loc['compensation_style'] == 'Net Billing (Wholesale)') or (agent.loc['compensation_style'] == 'Net Billing (Avoided Cost)'):
export_tariff = tFuncs.Export_Tariff(full_retail_nem=False)
if pv_size<=agent.loc['nem_system_size_limit_kw']:
if agent.loc['compensation_style'] == 'Net Billing (Wholesale)':
export_tariff.set_constant_sell_price(agent.loc['wholesale_elec_usd_per_kwh'])
elif agent.loc['compensation_style'] == 'Net Billing (Avoided Cost)':
export_tariff.set_constant_sell_price(agent.loc['hourly_excess_sell_rate_usd_per_kwh'])
else:
export_tariff.set_constant_sell_price(0.)
batt_power = system_df['batt_kw'][i].copy()
batt.set_cap_and_power(batt_power*batt_ratio, batt_power)
if batt_power > 0:
estimator_params = est_params_df.loc[system_df['pv'][i].copy(), 'estimator_params']
estimated_results = dFuncs.determine_optimal_dispatch(load_profile, pv_size*pv_cf_profile, batt, tariff, export_tariff, estimator_params=estimator_params, estimated=True, DP_inc=DP_inc_est, d_inc_n=d_inc_n_est, estimate_demand_levels=True)
system_df.loc[i, 'est_bills'] = estimated_results['bill_under_dispatch']
else:
bill_with_PV, _ = tFuncs.bill_calculator(load_and_pv_profile, tariff, export_tariff)
system_df.loc[i, 'est_bills'] = bill_with_PV #+ one_time_charge
# for net metering agents: if system size within policy limits, set full_retail_nem=True -- otherwise set export value to wholesale price
elif agent.loc['compensation_style'] == 'Net Metering':
if pv_size<=agent.loc['nem_system_size_limit_kw']:
export_tariff = tFuncs.Export_Tariff(full_retail_nem=True)
export_tariff.periods_8760 = tariff.e_tou_8760
export_tariff.prices = tariff.e_prices_no_tier
else:
export_tariff = tFuncs.Export_Tariff(full_retail_nem=False)
export_tariff.set_constant_sell_price(agent.loc['wholesale_elec_usd_per_kwh'])
batt_power = system_df['batt_kw'][i].copy()
batt.set_cap_and_power(batt_power*batt_ratio, batt_power)
if batt_power > 0:
estimator_params = est_params_df.loc[system_df['pv'][i].copy(), 'estimator_params']
estimated_results = dFuncs.determine_optimal_dispatch(load_profile, pv_size*pv_cf_profile, batt, tariff, export_tariff, estimator_params=estimator_params, estimated=True, DP_inc=DP_inc_est, d_inc_n=d_inc_n_est, estimate_demand_levels=True)
system_df.loc[i, 'est_bills'] = estimated_results['bill_under_dispatch']
else:
bill_with_PV, _ = tFuncs.bill_calculator(load_and_pv_profile, tariff, export_tariff)
system_df.loc[i, 'est_bills'] = bill_with_PV #+ one_time_charge
# for agents with no compensation mechanism: set sell rate to 0 and calculate bill with net load profile
else:
export_tariff = tFuncs.Export_Tariff(full_retail_nem=False)
export_tariff.set_constant_sell_price(0.)
batt_power = system_df['batt_kw'][i].copy()
batt.set_cap_and_power(batt_power*batt_ratio, batt_power)
if batt_power > 0:
estimator_params = est_params_df.loc[system_df['pv'][i].copy(), 'estimator_params']
estimated_results = dFuncs.determine_optimal_dispatch(load_profile, pv_size*pv_cf_profile, batt, tariff, export_tariff, estimator_params=estimator_params, estimated=True, DP_inc=DP_inc_est, d_inc_n=d_inc_n_est, estimate_demand_levels=True)
system_df.loc[i, 'est_bills'] = estimated_results['bill_under_dispatch']
else:
bill_with_PV, _ = tFuncs.bill_calculator(load_and_pv_profile, tariff, export_tariff)
system_df.loc[i, 'est_bills'] = bill_with_PV #+ one_time_charge
# Calculate bill savings cash flow
# elec_price_multiplier is the scalar increase in the cost of electricity since 2016, when the tariffs were curated
# elec_price_escalator is this agent's assumption about how the price of electricity will change in the future.
avg_est_bill_savings = (original_bill - np.array(system_df['est_bills'])).reshape([n_sys, 1]) * agent['elec_price_multiplier']
est_bill_savings = | np.zeros([n_sys, agent['economic_lifetime']+1]) | numpy.zeros |
"""Contains functions to evaluate performance of NTP model"""
import copy
import itertools
import numpy as np
from sklearn import metrics
import tensorflow as tf
from ntp.modules.prover import prove
from ntp.modules.gradient import retrieve_top_k
from ntp.modules.kb import kb2ids, partition, kb2nkb
from ntp.modules.nunify import l2_sim_np
from ntp.util.util_eval import decode_rules, harmonic
def auc_helper(relationships, run_rules, run_confidences):
"""
Calculates auc-roc measuring the recall and precision of learned rules relative to a set of existing relationships
"""
targets = []
scores = []
for head, body in relationships.items():
targets.append(1.0)
if [head, body] in run_rules:
index = run_rules.index([head, body])
scores.append(run_confidences[index])
else:
scores.append(0.0)
for j, rule in enumerate(run_rules):
if rule[0] in rule[1]:
continue
# Append incorrect rules with score of 0
if rule[0] in relationships:
if relationships[rule[0]] == rule[1]:
continue
targets.append(0.0)
scores.append(run_confidences[j])
return targets, scores
def prop_rules(relationships, run_rules, run_confidences, threshold=0.0, allow_reverse=False):
"""
From a list of rules, calculates the proportion of relationships injected into the data that are present in the list of learned rules
"""
relationships_found = 0
for head, body in relationships.items():
if [head, body] in run_rules:
# This finds the first such rule, rules should be sorted by confidence to make sure it's the highest confidence of those rules.
index = run_rules.index([head, body])
if run_confidences[index] > threshold:
relationships_found += 1
elif allow_reverse == True and len(body) == 1 and [head, body] in run_rules:
index = run_rules.index([head, body])
if run_confidences[index] > threshold:
relationships_found += 1
return relationships_found / len(relationships)
def weighted_prop_rules(relationships, run_rules, run_confidences, threshold=0.0, allow_reverse=False):
"""
From a list of rules and confidences, calculates the proportion of relationships injected into the data that are present in the list of learned rules, weighted by rule confidence
Args:
relationships: relationships injected into the data
run_rules: learned rules
run_confidences: confidences corresponding to those rules. Rules should be sorted by confidence, from high to low.
threshold: minimum confidence under which a rule is not considered
allow_reverse: whether or not a rule 1>0 is accepted if the true rule is 0>1
Returns:
Proportion of relationships injected into the data that are present in the list of learned rules, weighted by confidence
"""
relationships_found = 0
for head, body in relationships.items():
if [head, body] in run_rules:
# This finds the first such rule, rules should be sorted by confidence to make sure it's the highest confidence of those rules.
index = run_rules.index([head, body])
if run_confidences[index] > threshold:
relationships_found += run_confidences[index]
elif allow_reverse == True and len(body) == 1 and [head, body] in run_rules:
index = run_rules.index([head, body])
if run_confidences[index] > threshold:
relationships_found += run_confidences[index]
return relationships_found / len(relationships)
def weighted_precision(relationships, run_rules, run_confidences, threshold=0.0, allow_reverse=False):
"""
From a list of rules and confidences, calculates the proportion of those rules that match relationships injected into the data, weighted by confidence.
"""
wrong_relationship_weight = 0
total_relationship_weight = 0
for j, rule in enumerate(run_rules):
# Skip rules with confidence below threshold
if run_confidences[j] < threshold or rule[0] in rule[1]:
continue
total_relationship_weight += run_confidences[j]
# Check if rule is correct
if rule[0] in relationships:
if relationships[rule[0]] == rule[1]:
continue
if len(rule[1]) == 1:
body_pred = list(rule[1])[0]
# If learning reverse rule is acceptable, check for reverse rule for rules with only one body predicate
if allow_reverse and body_pred in relationships and relationships[body_pred] == {rule[0]}:
continue
# Learning x-->x is not wrong, technically
elif len(rule) == 2 and rule[0] == body_pred:
continue
wrong_relationship_weight += run_confidences[j]
if total_relationship_weight != 0:
return (total_relationship_weight - wrong_relationship_weight) / total_relationship_weight
else:
return 0
def confidence_accuracy(relationships, run_rules, run_confidences, threshold=0.0, allow_reverse=False):
"""
From a list of rules and confidences, calculates 'confidence accuracy', giving positive points for being confident and right and negative points for confident and wrong
"""
score = 0
for j, rule in enumerate(run_rules):
# Skip rules with confidence below threshold
if run_confidences[j] < threshold:
continue
if rule[0] in relationships:
if relationships[rule[0]] == rule[1]:
score += run_confidences[j]
continue
if len(rule) == 2:
body_pred = list(rule[1])[0]
if allow_reverse and relationships[body_pred] == rule[0]:
score += run_confidences[j]
continue
# skip identity
if rule[0] == body_pred:
continue
# if rule was not correct, add negative confidence
score -= run_confidences[j]
return score
def eval_batch(goal, target, emb, kb_ids, vocab, goal_struct, batch_mask, k_max, max_depth):
"""Retrieve accuracy of batch of facts relative to target, given kb of training facts"""
kb_goal = copy.deepcopy(kb_ids)
kb_goal['goal'] = [[row.numpy() for row in goal]]
nkb = kb2nkb(kb_goal, emb)
goal = [{'struct': 'goal', 'atom': 0, 'symbol': i} for i in range(len(goal_struct[0]))]
proofs = prove(nkb, goal, goal_struct, batch_mask,
k_max=k_max, max_depth=max_depth, vocab=vocab)
score = np.squeeze(retrieve_top_k(proofs).numpy())
target = target.numpy()
result = score > 0.5
accuracy = np.mean(result == target)
weighted_accuracy = np.mean((target == 1) * (1 * result) + (target == 0) * (-1 * result))
return accuracy, weighted_accuracy
def eval_fact_accuracy(batch_list, emb, kb_ids, vocab, k_max, max_depth):
"""Retrieve average accuracy of list of _train_ fact batches, given kb of training facts. """
accuracy_list= []
weighted_accuracy_list = []
for j, batch in enumerate(batch_list):
goal = tf.constant(batch["goal"])
mask_indices = tf.constant(batch["mask_indices"])
with tf.device("/device:GPU:0"):
target = tf.constant(batch["target"], dtype=tf.float32)
base_mask = tf.ones([batch["n_facts_struct"], batch["batch_size"]], dtype=tf.float32)
updates = -1.0 * tf.ones(len(batch["mask_indices"]), dtype=tf.float32)
batch_mask = tf.transpose(tf.constant(
base_mask + tf.scatter_nd(mask_indices, updates, base_mask.shape)))
batch_accuracy, weighted_accuracy = eval_batch(goal, target, emb, kb_ids, vocab, batch["struct"], batch_mask, k_max, max_depth)
accuracy_list.append(batch_accuracy)
weighted_accuracy_list.append(weighted_accuracy)
return np.mean(accuracy_list), | np.mean(weighted_accuracy_list) | numpy.mean |
import numpy as np
import numpy.linalg as la
from matplotlib import pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.cm as cm
from tqdm import tqdm
import sys
import SBW_util as util
from matplotlib.animation import FuncAnimation
# init cond
# plots 1D
# report (todo's, structure, division of work )
# 2D scheme correspondence
# stability
# convergence plots
# test case w oxygen
eps_u = 0.001 # 0.01
eps_v = 0.001 # 0.001
gamma_u = 0.005# 0.05
zeta = 0.0
alpha_v = 0.1
beta_v = 0.1
eta_w = 10.0
def constr_lineqU(U, W, V, N, M, T):
'''
N: nb of x grid points (int)
T: current timestep (int)
U: discrete solution of u (np.array)
W: discrete solution of w (np.array)
V: discrete solution of v (np.array)
M: nb of time steps (int)
'''
h = 1.0/float(N)
k = 1.0/float(M)
#assert(U.shape == W.shape and W.shape == V.shape, 'Dim error')
#assert(U.shape[1] ==N and U.shape[0] == M, 'Dim error')
DT = 0
X_length = N
A2Ut = | np.zeros((X_length, X_length)) | numpy.zeros |
import copy
import logging
import os
from typing import Dict, List, Tuple
import checksumdir
import imageio
import numpy as np
import torch
from torch.utils.data import DataLoader, Dataset
from tqdm import tqdm
from ..adapter import download_object
logger = logging.getLogger("fastface.dataset")
class _IdentitiyTransforms:
"""Dummy tranforms"""
def __call__(self, img: np.ndarray, targets: Dict) -> Tuple:
return img, targets
def default_collate_fn(batch):
batch, targets = zip(*batch)
batch = np.stack(batch, axis=0).astype(np.float32)
batch = torch.from_numpy(batch).permute(0, 3, 1, 2).contiguous()
for i, target in enumerate(targets):
for k, v in target.items():
if isinstance(v, np.ndarray):
targets[i][k] = torch.from_numpy(v)
return batch, targets
class BaseDataset(Dataset):
def __init__(self, ids: List[str], targets: List[Dict], transforms=None, **kwargs):
super().__init__()
assert isinstance(ids, list), "given `ids` must be list"
assert isinstance(targets, list), "given `targets must be list"
assert len(ids) == len(targets), "lenght of both lists must be equal"
self.ids = ids
self.targets = targets
self.transforms = _IdentitiyTransforms() if transforms is None else transforms
# set given kwargs to the dataset
for key, value in kwargs.items():
if hasattr(self, key):
# log warning
continue
setattr(self, key, value)
def __getitem__(self, idx: int) -> Tuple:
img = self._load_image(self.ids[idx])
targets = copy.deepcopy(self.targets[idx])
# apply transforms
img, targets = self.transforms(img, targets)
# clip boxes
targets["target_boxes"] = self._clip_boxes(
targets["target_boxes"], img.shape[:2]
)
# discard zero sized boxes
targets["target_boxes"] = self._discard_zero_size_boxes(targets["target_boxes"])
return (img, targets)
def __len__(self) -> int:
return len(self.ids)
@staticmethod
def _clip_boxes(boxes: np.ndarray, shape: Tuple[int, int]) -> np.ndarray:
# TODO pydoc
height, width = shape
boxes[:, [0, 2]] = boxes[:, [0, 2]].clip(min=0, max=width - 1)
boxes[:, [1, 3]] = boxes[:, [1, 3]].clip(min=0, max=height - 1)
return boxes
@staticmethod
def _discard_zero_size_boxes(boxes: np.ndarray) -> np.ndarray:
# TODO pydoc
scale = (boxes[:, [2, 3]] - boxes[:, [0, 1]]).min(axis=1)
return boxes[scale > 0]
@staticmethod
def _load_image(img_file_path: str):
"""loads rgb image using given file path
Args:
img_path (str): image file path to load
Returns:
np.ndarray: rgb image as np.ndarray
"""
img = imageio.imread(img_file_path)
if not img.flags["C_CONTIGUOUS"]:
# if img is not contiguous than fix it
img = np.ascontiguousarray(img, dtype=img.dtype)
if len(img.shape) == 4:
# found RGBA, converting to => RGB
img = img[:, :, :3]
elif len(img.shape) == 2:
# found GRAYSCALE, converting to => RGB
img = np.stack([img, img, img], axis=-1)
return | np.array(img, dtype=np.uint8) | numpy.array |
import ctypes
import logging
import sysconfig
import numpy as np
import numpy.ctypeslib as npct
from .. import tools
from pathlib import Path
logger = logging.getLogger(__name__)
np_double = npct.ndpointer(np.float64, ndim=1, flags='aligned, contiguous, writeable')
np_complex = npct.ndpointer(np.complex128, ndim=1, flags='aligned, c_contiguous, writeable')
np_complex_2d = npct.ndpointer(np.complex128, ndim=2, flags='aligned, c_contiguous, writeable')
np_complex_single = npct.ndpointer(np.complex128, ndim=0)
np_int_pointer = npct.ndpointer(np.int32, ndim=1, flags='aligned, contiguous, writeable')
# Find suffix
suffix = sysconfig.get_config_var('EXT_SUFFIX')
if suffix is None:
suffix = ".so"
# We start by making a path to the current directory.
pymodule_dir = Path(__file__).resolve().parent
__libpath__ = pymodule_dir / ('libxcorr' + suffix)
# Then we open the created shared libecho file
libecho = ctypes.CDLL(__libpath__)
libecho.xcorr_echo_search.argtypes = [
ctypes.c_double,
ctypes.c_double,
ctypes.c_double,
np_complex,
ctypes.c_int,
np_double,
ctypes.c_int,
np_complex_2d,
np_complex_2d,
np_int_pointer,
np_complex,
ctypes.c_int,
np_int_pointer,
ctypes.c_int,
ctypes.c_double,
]
@tools.profiling.timeing(f'{__name__}')
def xcorr_echo_search(
raw_data,
doppler_freq_min,
doppler_freq_max,
doppler_freq_step,
signal_model,
full_gmf_output=False,
):
"""
# Will take a raw_data object and crosscorrelate the data.
"""
matched_filter_output = {}
pows_output = []
index_finish = 0
sample_signal_all = np.sum(raw_data.data, 0)
doppler_freq_size = int(((doppler_freq_max - doppler_freq_min) / doppler_freq_step) + 1)
if len(signal_model.shape) == 1:
signal_model.shape = (1, signal_model.size)
signal_model_size = signal_model.shape[1]
best_peak = np.zeros(sample_signal_all.shape[1], dtype=np.complex128)
best_start = np.zeros(sample_signal_all.shape[1])
best_doppler = np.zeros(sample_signal_all.shape[1])
max_pow_per_delay = np.zeros(
[sample_signal_all.shape[0] + signal_model_size,
sample_signal_all.shape[1]],
dtype=np.complex128
)
max_pow_per_delay_norm = np.zeros(
[sample_signal_all.shape[0] + signal_model_size,
sample_signal_all.shape[1]],
dtype=np.complex128
)
samp = | np.float64(6E-6) | numpy.float64 |
r"""
The Abel-Boutle (2012) PSD
==========================
The Abel-Boutle (2012) PSD is a single moment PSD intended to represent rain
drops. Particle number densities are represented using a gamma distribution function
.. math::
N(D) &= N_0\ D^\gamma \ \exp(-\lambda D).
The parameters :math:`N_0` and :math:`\lambda` can be diagnosed from the rain water
content using
.. math::
\lambda &= \left [ \frac{\pi \rho_\text{w} x_1 \Gamma(4 + \mu)}{6 \rho_\text{air} q_\text{R}}]^{\frac{1}{4 + \mu - x_2}}
N_0 &= x_1 \lambda^{x_2}
.. [AB2012] <NAME>, Boutle IA. 2012. An improved representation of the raindrop size distribution for
single-moment microphysics schemes. <NAME>. Soc. 138: 2151–2162. DOI:10.1002/qj.1949
"""
import numpy as np
import scipy as sp
from scipy.special import gamma
from pyarts.workspace import arts_agenda
from artssat import dimensions as dim
from artssat.scattering.psd.data.psd_data import D_eq
from artssat.scattering.psd.arts.arts_psd import ArtsPSD
from artssat.scattering.psd.data.psd_data import PSDData
class AB12(ArtsPSD):
r"""
The AB12 class provides an implementation of the Abel-Boutle (2012) single-moment
PSD for rain drops.
"""
@classmethod
def from_psd_data(self, psd, mu = 0.0):
r"""
Create a AB12 PSD from given psd data.
Parameters:
psd(PSDData or other PSD): PSD data from which to create the MY05
representation.
mu(:code:`float` or array): The value of the mu parameter to use.
"""
mass_density = psd.get_mass_density()
return AB12(mu, mass_density)
def __init__(self,
mu = 0.0,
mass_density = None):
r"""
Parameters:
mu(:code:`numpy.float`): The :math:`\mu` parameter of the PSD
mass_density(:code:`numpy.ndarray`): Array containing
the water content for a given set of volume elements in an
atmosphere.
"""
self.mu = mu
if not mass_density is None:
self.mass_density = mass_density
super().__init__(D_eq(1000.0))
def convert_from(self, psd):
r"""
Convert given psd to AB12 PSD with :math:`\mu` parameter of this instance.
Parameters:
psd: Other PSD providing :code:`get_moment` and :code:`get_mass_density`
member functions.
"""
self.mass_density = psd.get_mass_density()
def _get_parameters(self):
"""
Checks if parameters of the PSD are available and tries to broadcast
them to the shape of the mass density data. Calculates parameters of
Returns:
:code:`tuple(n0, lmbd, mu)` containing the parameters of
the PSD function.
Raises:
An exception if any of the parameters is not set or cannot be
broadcasted into the shape of the number density data.
"""
# Number density
# Mass density
m = self.mass_density
if m is None:
raise Exception("The mass density needs to be set to use"
" this function.")
shape = m.shape
try:
mu = np.broadcast_to( | np.array(self.mu) | numpy.array |
# -*- coding: utf-8 -*-
"""Tests for `codex-africanus` package."""
import numpy as np
import pytest
def test_fit_spi_components_vs_scipy():
"""
Here we just test the per component spi fitter against
a looped version of scipy's curve_fit
:return:
"""
from africanus.model.spi import fit_spi_components
curve_fit = pytest.importorskip("scipy.optimize").curve_fit
np.random.seed(123)
ncomps = 25
alphas = -0.7 + 0.25 * np.random.randn(ncomps, 1)
i0s = 5.0 + np.random.randn(ncomps, 1)
nfreqs = 100
freqs = np.linspace(0.5, 1.5, nfreqs).reshape(1, nfreqs)
freq0 = 0.7
model = i0s * (freqs / freq0) ** alphas
sigma = np.abs(0.25 + 0.1 * np.random.randn(nfreqs))
data = model + sigma[None, :] * np.random.randn(ncomps, nfreqs)
weights = 1.0/sigma**2
alpha1, alphavar1, I01, I0var1 = fit_spi_components(
data, weights, freqs.squeeze(), freq0, tol=1e-8)
def spi_func(nu, I0, alpha):
return I0 * nu ** alpha
I02 = | np.zeros(ncomps) | numpy.zeros |
# Copyright 2019 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest import mock
import itertools
import random
import numpy as np
import pytest
import sympy
import cirq
class PlusGate(cirq.Gate):
"""A qudit gate that increments a qudit state mod its dimension."""
def __init__(self, dimension, increment=1):
self.dimension = dimension
self.increment = increment % dimension
def _qid_shape_(self):
return (self.dimension,)
def _unitary_(self):
inc = (self.increment - 1) % self.dimension + 1
u = np.empty((self.dimension, self.dimension))
u[inc:] = np.eye(self.dimension)[:-inc]
u[:inc] = np.eye(self.dimension)[-inc:]
return u
class _TestMixture(cirq.Gate):
def __init__(self, gate_options):
self.gate_options = gate_options
def _qid_shape_(self):
return cirq.qid_shape(self.gate_options[0], ())
def _mixture_(self):
return [(1 / len(self.gate_options), cirq.unitary(g))
for g in self.gate_options]
def test_invalid_dtype():
with pytest.raises(ValueError, match='complex'):
cirq.DensityMatrixSimulator(dtype=np.int32)
@pytest.mark.parametrize('dtype', [np.complex64, np.complex128])
def test_run_no_measurements(dtype):
q0, q1 = cirq.LineQubit.range(2)
simulator = cirq.DensityMatrixSimulator(dtype=dtype)
circuit = cirq.Circuit(cirq.X(q0), cirq.X(q1))
with pytest.raises(ValueError, match="no measurements"):
simulator.run(circuit)
@pytest.mark.parametrize('dtype', [np.complex64, np.complex128])
def test_run_no_results(dtype):
q0, q1 = cirq.LineQubit.range(2)
simulator = cirq.DensityMatrixSimulator(dtype=dtype)
circuit = cirq.Circuit(cirq.X(q0), cirq.X(q1))
with pytest.raises(ValueError, match="no measurements"):
simulator.run(circuit)
@pytest.mark.parametrize('dtype', [np.complex64, np.complex128])
def test_run_empty_circuit(dtype):
simulator = cirq.DensityMatrixSimulator(dtype=dtype)
with pytest.raises(ValueError, match="no measurements"):
simulator.run(cirq.Circuit())
@pytest.mark.parametrize('dtype', [np.complex64, np.complex128])
def test_run_bit_flips(dtype):
q0, q1 = cirq.LineQubit.range(2)
simulator = cirq.DensityMatrixSimulator(dtype=dtype)
for b0 in [0, 1]:
for b1 in [0, 1]:
circuit = cirq.Circuit((cirq.X**b0)(q0), (cirq.X**b1)(q1),
cirq.measure(q0), cirq.measure(q1))
result = simulator.run(circuit)
np.testing.assert_equal(result.measurements,
{'0': [[b0]], '1': [[b1]]})
@pytest.mark.parametrize('dtype', [np.complex64, np.complex128])
def test_run_qudit_increments(dtype):
q0, q1 = cirq.LineQid.for_qid_shape((3, 4))
simulator = cirq.DensityMatrixSimulator(dtype=dtype)
for b0 in [0, 1, 2]:
for b1 in [0, 1, 2, 3]:
circuit = cirq.Circuit(
[PlusGate(3, 1)(q0)] * b0,
[PlusGate(4, 1)(q1)] * b1,
cirq.measure(q0),
cirq.measure(q1),
)
result = simulator.run(circuit)
np.testing.assert_equal(result.measurements, {
'0 (d=3)': [[b0]],
'1 (d=4)': [[b1]]
})
@pytest.mark.parametrize('dtype', [np.complex64, np.complex128])
def test_run_not_channel_op(dtype):
class BadOp(cirq.Operation):
def __init__(self, qubits):
self._qubits = qubits
@property
def qubits(self):
return self._qubits
def with_qubits(self, *new_qubits):
# coverage: ignore
return BadOp(self._qubits)
q0 = cirq.LineQubit(0)
simulator = cirq.DensityMatrixSimulator(dtype=dtype)
circuit = cirq.Circuit([BadOp([q0])])
with pytest.raises(TypeError):
simulator.simulate(circuit)
@pytest.mark.parametrize('dtype', [np.complex64, np.complex128])
def test_run_mixture(dtype):
q0, q1 = cirq.LineQubit.range(2)
circuit = cirq.Circuit(
cirq.bit_flip(0.5)(q0), cirq.measure(q0), cirq.measure(q1))
simulator = cirq.DensityMatrixSimulator(dtype=dtype)
result = simulator.run(circuit, repetitions=100)
np.testing.assert_equal(result.measurements['1'], [[0]] * 100)
# Test that we get at least one of each result. Probability of this test
# failing is 2 ** (-99).
q0_measurements = set(x[0] for x in result.measurements['0'].tolist())
assert q0_measurements == {0, 1}
@pytest.mark.parametrize('dtype', [np.complex64, np.complex128])
def test_run_qudit_mixture(dtype):
q0, q1 = cirq.LineQid.for_qid_shape((3, 2))
mixture = _TestMixture([PlusGate(3, 0), PlusGate(3, 1), PlusGate(3, 2)])
circuit = cirq.Circuit(mixture(q0), cirq.measure(q0), cirq.measure(q1))
simulator = cirq.DensityMatrixSimulator(dtype=dtype)
result = simulator.run(circuit, repetitions=100)
np.testing.assert_equal(result.measurements['1 (d=2)'], [[0]] * 100)
# Test that we get at least one of each result. Probability of this test
# failing is about 3 * (2/3) ** 100.
q0_measurements = set(x[0] for x in result.measurements['0 (d=3)'].tolist())
assert q0_measurements == {0, 1, 2}
@pytest.mark.parametrize('dtype', [np.complex64, np.complex128])
def test_run_channel(dtype):
q0, q1 = cirq.LineQubit.range(2)
circuit = cirq.Circuit(cirq.X(q0),
cirq.amplitude_damp(0.5)(q0), cirq.measure(q0),
cirq.measure(q1))
simulator = cirq.DensityMatrixSimulator(dtype=dtype)
result = simulator.run(circuit, repetitions=100)
np.testing.assert_equal(result.measurements['1'], [[0]] * 100)
# Test that we get at least one of each result. Probability of this test
# failing is 2 ** (-99).
q0_measurements = set(x[0] for x in result.measurements['0'].tolist())
assert q0_measurements == {0, 1}
@pytest.mark.parametrize('dtype', [np.complex64, np.complex128])
def test_run_qudit_channel(dtype):
class TestChannel(cirq.Gate):
def _qid_shape_(self):
return (3,)
def _channel_(self):
return [
np.array([[1, 0, 0], [0, 0.5**0.5, 0], [0, 0, 0.5**0.5]]),
np.array([[0, 0.5**0.5, 0], [0, 0, 0], [0, 0, 0]]),
np.array([[0, 0, 0], [0, 0, 0.5**0.5], [0, 0, 0]]),
]
q0, q1 = cirq.LineQid.for_qid_shape((3, 4))
circuit = cirq.Circuit(
PlusGate(3, 2)(q0),
TestChannel()(q0),
TestChannel()(q0),
cirq.measure(q0),
cirq.measure(q1),
)
simulator = cirq.DensityMatrixSimulator(dtype=dtype)
result = simulator.run(circuit, repetitions=100)
| np.testing.assert_equal(result.measurements['1 (d=4)'], [[0]] * 100) | numpy.testing.assert_equal |
import numpy as np
import random
import torch
class DataSet(object):
def __len__(self):
raise NotImplementedError
def __getitem__(self, index):
raise NotImplementedError
class MyDataSet(DataSet):
def __init__(self, insts, transform=None):
self.insts = insts
self.transform = transform
def __len__(self):
return len(self.insts)
def __getitem__(self, idx):
sample = self.insts[idx]
if self.transform:
sample = self.transform(sample)
return sample
def __iter__(self):
for inst in self.insts:
yield inst
def index(self, item):
return self.insts.index(item)
def data_split(self, split_rate=0.33, shuffle=False):
assert self.insts and len(self.insts) > 0
if shuffle:
np.random.shuffle(self.insts)
val_size = int(len(self.insts) * split_rate)
train_set = MyDataSet(self.insts[:-val_size])
val_set = MyDataSet(self.insts[-val_size:])
return train_set, val_set
def data_split(data_set, split_rate: list, shuffle=False):
assert len(data_set) != 0, 'Empty dataset !'
assert len(split_rate) != 0, 'Empty split rate list !'
n = len(data_set)
if shuffle:
range_idxs = | np.random.permutation(n) | numpy.random.permutation |
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/02_data.ipynb (unless otherwise specified).
__all__ = ['show', 'DeformationField', 'BaseDataset', 'RandomTileDataset', 'TileDataset']
# Cell
import os, zarr, cv2, imageio, shutil, numpy as np
from joblib import Parallel, delayed
from scipy import ndimage
from scipy.interpolate import Rbf
from scipy.interpolate import interp1d
from matplotlib.patches import Rectangle
from skimage.measure import label
from skimage.color import label2rgb
import torch, torch.nn as nn, torch.nn.functional as F
from torch.utils.data import Dataset, DataLoader
from fastai.vision.all import *
from fastcore.all import *
from .transforms import random_center, WeightTransform, preprocess_mask, create_pdf
import gc
gc.enable()
# Cell
def show(*obj, file_name=None, overlay=False, pred=False,
show_bbox=True, figsize=(10,10), cmap='binary_r', **kwargs):
"Show image, mask, and weight (optional)"
if len(obj)==3:
img,msk,weight = obj
elif len(obj)==2:
img,msk = obj
weight = None
elif len(obj)==1:
img = obj[0]
msk, weight = None, None
else:
raise ValueError(f'Function not defined for {len(obj)} arguments.')
# Image preprocessing
img = np.array(img)
# Swap axis to channels last
if img.shape[0]<20: img=np.moveaxis(img,0,-1)
# One channel images
if img.ndim == 3 and img.shape[-1] == 1:
img=img[...,0]
# Mask preprocessing
if msk is not None:
msk = np.array(msk)
# Remove background class from masks
if msk.shape[0]==2: msk=msk[1,...]
# Create bbox
pad = (np.array(img.shape[:2])-np.array(msk.shape))//2
bbox = Rectangle((pad[0]-1,pad[1]-1),img.shape[0]-2*pad[0]+1,img.shape[0]-2*pad[0]+1,
edgecolor='r',linewidth=1,facecolor='none')
# Padding mask and weights
msk = np.pad(msk, pad, 'constant', constant_values=(0))
if cmap is None:
cmap = 'binary_r' if msk.max()==1 else cmap
# Weights preprocessing
if weight is not None:
weight = np.array(weight)
weight = np.pad(weight, pad, 'constant', constant_values=(0))
ncol=1 if msk is None else 2
ncol=ncol if weight is None else ncol+1
fig, ax = plt.subplots(1,ncol,figsize=figsize)
img_ax = ax[0] if ncol>1 else ax
# Plot img
img_ax.imshow(img, cmap=cmap)
if file_name is not None:
img_ax.set_title('Image {}'.format(file_name))
else:
img_ax.set_title('Image')
img_ax.set_axis_off()
# Plot img and mask
if msk is not None:
if overlay:
label_image = label(msk)
img_l2o = label2rgb(label_image, image=img, bg_label=0, alpha=.8, image_alpha=1)
ax[1].set_title('Image + Mask (#ROIs: {})'.format(label_image.max()))
ax[1].imshow(img_l2o)
else:
ax[1].imshow(msk, cmap=cmap)
ax[1].set_title('Mask')
if show_bbox: ax[1].add_patch(copy(bbox))
ax[1].set_axis_off()
# Plot weights
if weight is not None:
max_w = weight.max()
vmax_w = max(1, max_w)
ax[2].imshow(weight, vmax=vmax_w, cmap=cmap)
if pred:
ax[2].set_title('Prediction')
else:
ax[2].set_title('Weights (max value: {:.{p}f})'.format(max_w, p=1))
if show_bbox: ax[2].add_patch(copy(bbox))
ax[2].set_axis_off()
#ax.set_axis_off()
plt.tight_layout()
plt.show()
# Cell
@typedispatch
def show_batch(x:TensorImage, y:tuple, samples, max_n=6, figsize=None, **kwargs):
"Show one batch (image, mask, and weights) from a `DataLoader`"
max_n = np.min((max_n, len(x)))
if figsize is None: figsize = (12, max_n * 5)
for i in range(max_n): show(x[i], y[0][i], y[1][i], figsize=figsize, **kwargs)
# Cell
@typedispatch
def show_results(x:TensorImage, y:tuple, samples, outs, max_n=4, figsize=None, **kwargs):
"Show image, mask, and weights from `max_n` items"
max_n = np.min((max_n, len(x)))
if figsize is None: figsize = (12, max_n * 5)
for i in range(max_n): show(x[i], y[0][i], outs[i][0], pred=True, figsize=figsize, **kwargs)
# Cell
class DeformationField:
"Creates a deformation field for data augmentation"
def __init__(self, shape=(540, 540), scale=1):
self.shape, self.scale = shape, scale
#grid_range = [np.arange(d*self.scale, step=scale) - (d*self.scale) / 2 for d in shape]
#grid_range = [np.linspace(-(d*self.scale)/2, (d*self.scale)/2, d) for d in shape]
# Same behavoiur as np.arange
grid_range = [np.linspace(-(d*self.scale)/2, ((d*self.scale)/2)-1, d) for d in shape]
self.deformationField = np.meshgrid(*grid_range)[::-1]
self.orders = [cv2.INTER_NEAREST, cv2.INTER_LINEAR, cv2.INTER_CUBIC]
def rotate(self, theta=0, phi=0, psi=0):
"Rotate deformation field"
if len(self.shape) == 2:
self.deformationField = [
self.deformationField[0] * np.cos(theta)
+ self.deformationField[1] * np.sin(theta),
-self.deformationField[0] * np.sin(theta)
+ self.deformationField[1] * np.cos(theta),
]
else:
self.deformationField = [
self.deformationField[0],
self.deformationField[1] * np.cos(theta)
+ self.deformationField[2] * np.sin(theta),
-self.deformationField[1] * np.sin(theta)
+ self.deformationField[2] * np.cos(theta),
]
self.deformationField = [
self.deformationField[0] * np.cos(phi)
+ self.deformationField[2] * np.sin(phi),
self.deformationField[1]
- self.deformationField[0] * np.sin(phi)
+ self.deformationField[2] * np.cos(phi),
]
self.deformationField = [
self.deformationField[0],
self.deformationField[1] * np.cos(psi)
+ self.deformationField[2] * np.sin(psi),
-self.deformationField[1] * np.sin(psi)
+ self.deformationField[2] * np.cos(psi),
]
def mirror(self, dims):
"Mirror deformation fild at dims"
for d in range(len(self.shape)):
if dims[d]:
self.deformationField[d] = -self.deformationField[d]
def addRandomDeformation(self, grid=(150, 150), sigma=(10, 10)):
"Add random deformation to the deformation field"
seedGrid = np.meshgrid(
*[np.arange(-g / 2, s + g / 2, g) for (g, s) in zip(grid, self.shape)]
)
seed = [np.random.normal(0, s, g.shape) for (g, s) in zip(seedGrid, sigma)]
defFcn = [Rbf(*seedGrid, s, function="cubic") for s in seed]
targetGrid = np.meshgrid(*map(np.arange, self.shape))
deformation = [f(*targetGrid) for f in defFcn]
self.deformationField = [
f + df for (f, df) in zip(self.deformationField, deformation)
]
def get(self, offset=(0, 0), pad=(0, 0)):
"Get relevant slice from deformation field"
sliceDef = tuple(slice(int(p / 2), int(-p / 2)) if p > 0 else None for p in pad)
deform = [d[sliceDef] for d in self.deformationField]
return [d + offs for (d, offs) in zip(deform, offset)]
def apply_slow(self, data, offset=(0, 0), pad=(0, 0), order=1):
"Apply deformation field to image using interpolation"
outshape = tuple(int(s - p) for (s, p) in zip(self.shape, pad))
coords = [d.flatten() for d in self.get(offset, pad)]
if len(data.shape) == len(self.shape) + 1:
tile = np.empty((data.shape[-1], *outshape))
for c in range(data.shape[-1]):
tile[c,...] = ndimage.interpolation.map_coordinates(data[..., c], coords, order=order, mode="reflect").reshape(outshape)
else:
tile = ndimage.interpolation.map_coordinates(data, coords, order=order, mode="reflect").reshape(outshape)
return tile.astype(data.dtype)
def apply(self, data, offset=(0, 0), pad=(0, 0), order=1):
"Apply deformation field to image using interpolation"
outshape = tuple(int(s - p) for (s, p) in zip(self.shape, pad))
coords = [np.squeeze(d).astype('float32').reshape(*outshape) for d in self.get(offset, pad)]
# Get slices to avoid loading all data (.zarr files)
sl = []
for i in range(len(coords)):
cmin, cmax = int(coords[i].min()), int(coords[i].max())
dmax = data.shape[i]
if cmin<0:
cmax = max(-cmin, cmax)
cmin = 0
elif cmax>dmax:
cmin = min(cmin, 2*dmax-cmax)
cmax = dmax
coords[i] -= cmin
else: coords[i] -= cmin
sl.append(slice(cmin, cmax))
if len(data.shape) == len(self.shape) + 1:
tile = np.empty((*outshape, data.shape[-1]))
for c in range(data.shape[-1]):
tile[..., c] = cv2.remap(data[sl[0],sl[1], c], coords[1],coords[0], interpolation=order, borderMode=cv2.BORDER_REFLECT)
else:
tile = cv2.remap(data[sl[0], sl[1]], coords[1], coords[0], interpolation=order, borderMode=cv2.BORDER_REFLECT)
return tile
# Cell
def _read_img(path, divide=None, **kwargs):
"Read image and normalize to 0-1 range"
if path.suffix == '.zarr':
img = zarr.convenience.open(path.as_posix())
if len(img.shape)==4: # assuming shape (z_dim, n_channel, y_dim, x_dim)
img = np.max(img, axis=0) # max z projection
img = np.moveaxis(img, 0, -1)
else:
img = imageio.imread(path, **kwargs)
if divide is None and img.max()>0:
img = img/np.iinfo(img.dtype).max
if divide is not None:
img = img/divide
#assert img.max()<=1. and img.max()>.04, f'Check image loading, dividing by {divide}, max value is {img.max()}'
assert img.max()<=1., f'Check image loading, dividing by {divide}'
if img.ndim == 2:
img = np.expand_dims(img, axis=2)
return img
# Cell
def _read_msk(path, n_classes=2, instance_labels=False, **kwargs):
"Read image and check classes"
if path.suffix == '.zarr':
msk = zarr.convenience.open(path.as_posix())
else:
msk = imageio.imread(path, **kwargs)
if not instance_labels:
if np.max(msk)>n_classes:
msk = msk//np.iinfo(msk.dtype).max
# Remove channels if no extra information given
if len(msk.shape)==3:
if np.array_equal(msk[...,0], msk[...,1]):
msk = msk[...,0]
# Mask check
assert len(np.unique(msk))<=n_classes, 'Check n_classes and provided mask'
return msk
# Cell
class BaseDataset(Dataset):
def __init__(self, files, label_fn=None, instance_labels = False, n_classes=2, divide=None, ignore={},remove_overlap=True,
tile_shape=(540,540), padding=(184,184),preproc_dir=None, fbr=.1, n_jobs=-1, verbose=1, scale=1, loss_weights=True, **kwargs):
store_attr('files, label_fn, instance_labels, divide, n_classes, ignore, tile_shape, remove_overlap, padding, fbr, scale, loss_weights')
self.c = n_classes
if label_fn is not None:
if not preproc_dir: self.preproc_dir = Path(label_fn(files[0])).parent/'.cache'
else: self.preproc_dir = Path(preproc_dir)
self.labels = zarr.group((self.preproc_dir/'labels').as_posix())
self.pdfs = zarr.group((self.preproc_dir/'pdfs').as_posix())
self._preproc(n_jobs, verbose)
def read_img(self, *args, **kwargs):
return _read_img(*args, **kwargs)
def read_mask(self, *args, **kwargs):
return _read_msk(*args, **kwargs)
def _name_fn(self, g):
"Name of preprocessed and compressed data."
return f'{g}_{self.fbr}'
def _preproc_file(self, file):
"Preprocesses and saves labels (msk), weights, and pdf."
label_path = self.label_fn(file)
if self.instance_labels:
clabels = None
instlabels = self.read_mask(label_path, self.c, instance_labels=True)
else:
clabels = self.read_mask(label_path, self.c)
instlabels = None
ign = self.ignore[file.name] if file.name in self.ignore else None
lbl = preprocess_mask(clabels, instlabels, n_dims=self.c, remove_overlap=self.remove_overlap)
self.labels[file.name] = lbl
self.pdfs[self._name_fn(file.name)] = create_pdf(lbl, ignore=ign, fbr=self.fbr, scale=512)
def _preproc(self, n_jobs=-1, verbose=0):
using_cache = False
preproc_queue=L()
for f in self.files:
try:
#lbl, wgt, pdf = _get_cached_data(self._cache_fn(f.name))
self.labels[f.name]
self.pdfs[self._name_fn(f.name)]
if not using_cache:
if verbose>0: print(f'Using preprocessed masks from {self.preproc_dir}')
using_cache = True
except:
if n_jobs==1:
if verbose>0: print('Preprocessing', f.name)
self._preproc_file(f)
else:
preproc_queue.append(f)
if len(preproc_queue)>0:
if verbose>0: print('Preprocessing', L([f.name for f in preproc_queue]))
_ = Parallel(n_jobs=n_jobs, verbose=verbose, backend='threading')(delayed(self._preproc_file)(f) for f in preproc_queue)
def get_data(self, files=None, max_n=None, mask=False):
if files is not None:
files = L(files)
elif max_n is not None:
max_n = np.min((max_n, len(self.files)))
files = self.files[:max_n]
else:
files = self.files
data_list = L()
for f in files:
if mask: d = self.labels[f.name]
else: d = self.read_img(f, divide=self.divide)
data_list.append(d)
return data_list
def show_data(self, files=None, max_n=6, ncols=1, figsize=None, **kwargs):
if files is not None:
files = L(files)
max_n = len(files)
else:
max_n = np.min((max_n, len(self.files)))
files = self.files[:max_n]
if figsize is None: figsize = (ncols*12, max_n//ncols * 5)
for f in files:
img = self.read_img(f, divide=self.divide)
if self.label_fn is not None:
lbl = self.labels[f.name]
show(img, lbl, file_name=f.name, figsize=figsize, show_bbox=False, **kwargs)
else:
show(img, file_name=f.name, figsize=figsize, show_bbox=False, **kwargs)
def clear_cached_weights(self):
"Clears cache directory with pretrained weights."
try:
shutil.rmtree(self.preproc_dir)
print(f"Deleting all cache at {self.preproc_dir}")
except: print(f"No temporary files to delete at {self.preproc_dir}")
#https://stackoverflow.com/questions/60101240/finding-mean-and-standard-deviation-across-image-channels-pytorch/60803379#60803379
def compute_stats(self, max_samples=50):
"Computes mean and std from files"
print('Computing Stats...')
mean_sum, var_sum = 0., 0.
for i, f in enumerate(self.files, 1):
img = self.read_img(f, divide=self.divide)[:]
mean_sum += img.mean((0,1))
var_sum += img.var((0,1))
if i==max_samples:
print(f'Calculated stats from {i} files')
continue
self.mean = mean_sum/i
self.std = np.sqrt(var_sum/i)
return ([self.mean], [self.std])
# Cell
class RandomTileDataset(BaseDataset):
"""
Pytorch Dataset that creates random tiles with augmentations from the input images.
"""
n_inp = 1
def __init__(self, *args, sample_mult=None, flip=True, rotation_range_deg=(0, 360), deformation_grid=(150, 150), deformation_magnitude=(10, 10),
value_minimum_range=(0, 0), value_maximum_range=(1, 1), value_slope_range=(1, 1), p_zoom=0.75, zoom_sigma=0.1,
albumentations_tfms=None, **kwargs):
super().__init__(*args, **kwargs)
store_attr('sample_mult, flip, rotation_range_deg, deformation_grid, deformation_magnitude, value_minimum_range, value_maximum_range, value_slope_range, zoom_sigma, p_zoom, albumentations_tfms')
# Sample mulutiplier: Number of random samplings from augmented image
if self.sample_mult is None:
tile_shape = np.array(self.tile_shape)-np.array(self.padding)
msk_shape = np.array(self.get_data(max_n=1)[0].shape[:-1])
#msk_shape = np.array(lbl.shape[-2:])
self.sample_mult = int(np.product(np.floor(msk_shape/tile_shape)))
self.on_epoch_end()
def __len__(self):
return len(self.files)*self.sample_mult
def __getitem__(self, idx):
idx = idx % len(self.files)
if torch.is_tensor(idx):
idx = idx.tolist()
img_path = self.files[idx]
img = self.read_img(img_path, divide=self.divide)
n_channels = img.shape[-1]
lbl, pdf = self.labels[img_path.name], self.pdfs[self._name_fn(img_path.name)]
center = random_center(pdf[:], lbl.shape)
X = self.gammaFcn(self.deformationField.apply(img, center).flatten()).reshape((*self.tile_shape, n_channels))
Y = self.deformationField.apply(lbl, center, self.padding, 0)
X1 = X.copy()
if self.albumentations_tfms:
augmented = self.albumentations_tfms(image=(X*255).astype('uint8'),mask=Y.astype('uint8'))
X = (augmented['image']/255)
Y = augmented['mask']
X = X.transpose(2, 0, 1).astype('float32')
Y = Y.astype('int64')
if self.loss_weights:
_, W = cv2.connectedComponents((Y > 0).astype('uint8'), connectivity=4)
return TensorImage(X), TensorMask(Y), torch.Tensor(W)
else:
return TensorImage(X), TensorMask(Y)
def on_epoch_end(self, verbose=False):
if verbose: print("Generating deformation field")
if np.random.random()<self.p_zoom: scale=self.scale*np.random.normal(1, self.zoom_sigma)
else: scale=self.scale
self.deformationField = DeformationField(self.tile_shape, self.scale)
if self.rotation_range_deg[1] > self.rotation_range_deg[0]:
self.deformationField.rotate(
theta=np.pi * (np.random.random()
* (self.rotation_range_deg[1] - self.rotation_range_deg[0])
+ self.rotation_range_deg[0])
/ 180.0)
if self.flip:
self.deformationField.mirror(np.random.choice((True,False),2))
if self.deformation_grid is not None:
self.deformationField.addRandomDeformation(
self.deformation_grid, self.deformation_magnitude)
if verbose: print("Generating value augmentation function")
minValue = (self.value_minimum_range[0]
+ (self.value_minimum_range[1] - self.value_minimum_range[0])
* np.random.random())
maxValue = (self.value_maximum_range[0]
+ (self.value_maximum_range[1] - self.value_maximum_range[0])
* | np.random.random() | numpy.random.random |
# =============================================================================
# @@-COPYRIGHT-START-@@
#
# Copyright (c) 2021-2022, Qualcomm Innovation Center, Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# SPDX-License-Identifier: BSD-3-Clause
#
# @@-COPYRIGHT-END-@@
# =============================================================================
import pytest
import unittest
import random
import numpy as np
import time
import os
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
import json
import tensorflow as tf
from aimet_tensorflow.common.graph_eval import initialize_uninitialized_vars
from aimet_tensorflow.quantsim import QuantizationSimModel
from aimet_tensorflow.examples.test_models import depthwise_conv2d_model
from aimet_tensorflow.utils.op.conv import WeightTensorUtils
from aimet_common.quantsim import calculate_delta_offset
tf.compat.v1.disable_eager_execution()
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.WARN)
import libpymo
class TestTrainingExtensionsQcQuantizeOpPerChannel(unittest.TestCase):
def test_qc_quantize_op_cpu_conv(self):
"""
test custom op with CPU
"""
np.random.seed(0)
zero_out_module = tf.load_op_library('libaimet_tf_ops.so')
graph = tf.Graph()
config = tf.compat.v1.ConfigProto(log_device_placement=False)
sess = tf.compat.v1.Session(graph=graph, config=config)
bitwidth = 8
use_symm_encoding = True
with graph.as_default():
# place holder for the input
with tf.device("/device:CPU:0"):
num_output_channels = 3
inp = tf.compat.v1.placeholder(tf.float32, shape=[1, 1, 2, num_output_channels], name='input')
# Assuming 3 output channels
tensor_quantizer_int64 = [None] * num_output_channels
tensor_quantizers = [None] * num_output_channels
# Create a tensor_quantizer per channel
for i in range(num_output_channels):
tensor_quantizer = libpymo.TensorQuantizer(libpymo.QuantizationMode.QUANTIZATION_TF_ENHANCED,
libpymo.RoundingMode.ROUND_NEAREST)
tensor_quantizers[i] = tensor_quantizer
val = libpymo.PtrToInt64(tensor_quantizer)
tensor_quantizer_int64[i] = val
tensor_quant_ref = tf.Variable(tensor_quantizer_int64, trainable=False, dtype=tf.int64)
en_min = (np.zeros(num_output_channels)).tolist()
en_max = [1.0, 2.0, 2.5]
encoding_min = tf.Variable(en_min,
trainable=True, dtype=tf.double)
encoding_max = tf.Variable(en_max,
trainable=True, dtype=tf.double)
bit_width = tf.Variable(initial_value=bitwidth, trainable=False, dtype=tf.int8)
use_symmetric_encoding = tf.Variable(initial_value=use_symm_encoding, trainable=False, dtype=tf.bool)
mode_var = tf.Variable(initial_value=int(libpymo.TensorQuantizerOpMode.oneShotQuantizeDequantize),
trainable=False, dtype=tf.int32)
axis = tf.Variable(initial_value=3, trainable=False, dtype=tf.int32)
sess.run([mode_var.initializer, tensor_quant_ref.initializer, encoding_min.initializer,
encoding_max.initializer, bit_width.initializer, use_symmetric_encoding.initializer,
axis.initializer])
# Giving axis = 3
pass_through_op_output = zero_out_module.qc_quantize_per_channel(name='quant_op', in_tensor=inp,
op_mode=mode_var,
tensor_quantizer_reference=tensor_quant_ref,
encoding_min=encoding_min,
encoding_max=encoding_max,
bit_width=bit_width,
use_symmetric_encoding=use_symmetric_encoding,
axis=axis)
inp_tensor = sess.graph.get_tensor_by_name('input:0')
inp_data = np.ones((1, 1, 2, num_output_channels))
inp_data[:, :, :, 1] *= 2
inp_data[:, :, :, 2] *= 3
out_data = sess.run(pass_through_op_output, feed_dict={inp_tensor: inp_data})
for i in range(num_output_channels):
encoding = tensor_quantizers[i].computeEncoding(bitwidth, use_symm_encoding, False, False)
mode_var.load(int(libpymo.TensorQuantizerOpMode.quantizeDequantize), sess)
out_data = sess.run(pass_through_op_output, feed_dict={inp_tensor: inp_data})
# compare qc_quantize op's output with input
expected_output = np.ones((1, 1, 2, num_output_channels))
expected_output[:, :, :, 1] *= 2
expected_output[:, :, :, 2] *= 2.5
self.assertTrue(np.allclose(out_data, expected_output, rtol=0.01))
sess.close()
@pytest.mark.cuda
def test_qc_quantize_op_gpu_conv(self):
"""
test custom op with GPU
"""
np.random.seed(0)
zero_out_module = tf.load_op_library('libaimet_tf_ops.so')
graph = tf.Graph()
config = tf.compat.v1.ConfigProto(log_device_placement=False)
sess = tf.compat.v1.Session(graph=graph, config=config)
bitwidth = 8
use_symm_encoding = True
with graph.as_default():
# place holder for the input
num_output_channels = 3
inp = tf.compat.v1.placeholder(tf.float32, shape=[1, 1, 2, num_output_channels], name='input')
# Assuming 3 output channels
tensor_quantizer_int64 = [None] * num_output_channels
tensor_quantizers = [None] * num_output_channels
# Create a tensor_quantizer per channel
for i in range(num_output_channels):
tensor_quantizer = libpymo.TensorQuantizer(libpymo.QuantizationMode.QUANTIZATION_TF_ENHANCED,
libpymo.RoundingMode.ROUND_NEAREST)
tensor_quantizers[i] = tensor_quantizer
val = libpymo.PtrToInt64(tensor_quantizer)
tensor_quantizer_int64[i] = val
tensor_quant_ref = tf.Variable(tensor_quantizer_int64, trainable=False, dtype=tf.int64)
en_min = (np.zeros(num_output_channels)).tolist()
en_max = [1.0, 2.0, 2.5]
encoding_min = tf.Variable(en_min,
trainable=True, dtype=tf.double)
encoding_max = tf.Variable(en_max,
trainable=True, dtype=tf.double)
bit_width = tf.Variable(initial_value=bitwidth, trainable=False, dtype=tf.int8)
use_symmetric_encoding = tf.Variable(initial_value=use_symm_encoding, trainable=False, dtype=tf.bool)
mode_var = tf.Variable(initial_value=int(libpymo.TensorQuantizerOpMode.oneShotQuantizeDequantize),
trainable=False, dtype=tf.int32)
axis = tf.Variable(initial_value=3, trainable=False, dtype=tf.int32)
sess.run([mode_var.initializer, tensor_quant_ref.initializer, encoding_min.initializer,
encoding_max.initializer, bit_width.initializer, use_symmetric_encoding.initializer,
axis.initializer])
with tf.device("/device:GPU:0"):
# Giving axis = 3
pass_through_op_output = zero_out_module.qc_quantize_per_channel(name='quant_op', in_tensor=inp,
op_mode=mode_var,
tensor_quantizer_reference=tensor_quant_ref,
encoding_min=encoding_min,
encoding_max=encoding_max,
bit_width=bit_width,
use_symmetric_encoding=use_symmetric_encoding,
axis=3)
inp_tensor = sess.graph.get_tensor_by_name('input:0')
inp_data = np.ones((1, 1, 2, num_output_channels))
inp_data[:, :, :, 1] *= 2
inp_data[:, :, :, 2] *= 3
out_data = sess.run(pass_through_op_output, feed_dict={inp_tensor: inp_data})
mode_var.load(int(libpymo.TensorQuantizerOpMode.quantizeDequantize), sess)
out_data = sess.run(pass_through_op_output, feed_dict={inp_tensor: inp_data})
# compare qc_quantize op's output with input
expected_output = np.ones((1, 1, 2, num_output_channels))
expected_output[:, :, :, 1] *= 2
expected_output[:, :, :, 2] *= 2.5
self.assertTrue( | np.allclose(out_data, expected_output, rtol=0.01) | numpy.allclose |
#!/usr/bin/env python
# Copyright 2019 Division of Medical Image Computing, German Cancer Research Center (DKFZ).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""
Parts are based on https://github.com/multimodallearning/pytorch-mask-rcnn
published under MIT license.
"""
import warnings
warnings.filterwarnings('ignore', '.*From scipy 0.13.0, the output shape of zoom()*')
import numpy as np
import scipy.misc
import scipy.ndimage
import scipy.interpolate
from scipy.ndimage.measurements import label as lb
import torch
import tqdm
from custom_extensions.nms import nms
from custom_extensions.roi_align import roi_align
############################################################
# Segmentation Processing
############################################################
def sum_tensor(input, axes, keepdim=False):
axes = np.unique(axes)
if keepdim:
for ax in axes:
input = input.sum(ax, keepdim=True)
else:
for ax in sorted(axes, reverse=True):
input = input.sum(int(ax))
return input
def get_one_hot_encoding(y, n_classes):
"""
transform a numpy label array to a one-hot array of the same shape.
:param y: array of shape (b, 1, y, x, (z)).
:param n_classes: int, number of classes to unfold in one-hot encoding.
:return y_ohe: array of shape (b, n_classes, y, x, (z))
"""
dim = len(y.shape) - 2
if dim == 2:
y_ohe = np.zeros((y.shape[0], n_classes, y.shape[2], y.shape[3])).astype('int32')
elif dim == 3:
y_ohe = np.zeros((y.shape[0], n_classes, y.shape[2], y.shape[3], y.shape[4])).astype('int32')
else:
raise Exception("invalid dimensions {} encountered".format(y.shape))
for cl in np.arange(n_classes):
y_ohe[:, cl][y[:, 0] == cl] = 1
return y_ohe
def dice_per_batch_inst_and_class(pred, y, n_classes, convert_to_ohe=True, smooth=1e-8):
'''
computes dice scores per batch instance and class.
:param pred: prediction array of shape (b, 1, y, x, (z)) (e.g. softmax prediction with argmax over dim 1)
:param y: ground truth array of shape (b, 1, y, x, (z)) (contains int [0, ..., n_classes]
:param n_classes: int
:return: dice scores of shape (b, c)
'''
if convert_to_ohe:
pred = get_one_hot_encoding(pred, n_classes)
y = get_one_hot_encoding(y, n_classes)
axes = tuple(range(2, len(pred.shape)))
intersect = np.sum(pred*y, axis=axes)
denominator = np.sum(pred, axis=axes)+np.sum(y, axis=axes)
dice = (2.0*intersect + smooth) / (denominator + smooth)
return dice
def dice_per_batch_and_class(pred, targ, n_classes, convert_to_ohe=True, smooth=1e-8):
'''
computes dice scores per batch and class.
:param pred: prediction array of shape (b, 1, y, x, (z)) (e.g. softmax prediction with argmax over dim 1)
:param targ: ground truth array of shape (b, 1, y, x, (z)) (contains int [0, ..., n_classes])
:param n_classes: int
:param smooth: Laplacian smooth, https://en.wikipedia.org/wiki/Additive_smoothing
:return: dice scores of shape (b, c)
'''
if convert_to_ohe:
pred = get_one_hot_encoding(pred, n_classes)
targ = get_one_hot_encoding(targ, n_classes)
axes = (0, *list(range(2, len(pred.shape)))) #(0,2,3(,4))
intersect = np.sum(pred * targ, axis=axes)
denominator = np.sum(pred, axis=axes) + np.sum(targ, axis=axes)
dice = (2.0 * intersect + smooth) / (denominator + smooth)
assert dice.shape==(n_classes,), "dice shp {}".format(dice.shape)
return dice
def batch_dice(pred, y, false_positive_weight=1.0, smooth=1e-6):
'''
compute soft dice over batch. this is a differentiable score and can be used as a loss function.
only dice scores of foreground classes are returned, since training typically
does not benefit from explicit background optimization. Pixels of the entire batch are considered a pseudo-volume to compute dice scores of.
This way, single patches with missing foreground classes can not produce faulty gradients.
:param pred: (b, c, y, x, (z)), softmax probabilities (network output).
:param y: (b, c, y, x, (z)), one hote encoded segmentation mask.
:param false_positive_weight: float [0,1]. For weighting of imbalanced classes,
reduces the penalty for false-positive pixels. Can be beneficial sometimes in data with heavy fg/bg imbalances.
:return: soft dice score (float).This function discards the background score and returns the mena of foreground scores.
'''
if len(pred.size()) == 4:
axes = (0, 2, 3)
intersect = sum_tensor(pred * y, axes, keepdim=False)
denom = sum_tensor(false_positive_weight*pred + y, axes, keepdim=False)
return torch.mean(( (2*intersect + smooth) / (denom + smooth))[1:]) #only fg dice here.
elif len(pred.size()) == 5:
axes = (0, 2, 3, 4)
intersect = sum_tensor(pred * y, axes, keepdim=False)
denom = sum_tensor(false_positive_weight*pred + y, axes, keepdim=False)
return torch.mean(( (2*intersect + smooth) / (denom + smooth))[1:]) #only fg dice here.
else:
raise ValueError('wrong input dimension in dice loss')
############################################################
# Bounding Boxes
############################################################
def compute_iou_2D(box, boxes, box_area, boxes_area):
"""Calculates IoU of the given box with the array of the given boxes.
box: 1D vector [y1, x1, y2, x2] THIS IS THE GT BOX
boxes: [boxes_count, (y1, x1, y2, x2)]
box_area: float. the area of 'box'
boxes_area: array of length boxes_count.
Note: the areas are passed in rather than calculated here for
efficency. Calculate once in the caller to avoid duplicate work.
"""
# Calculate intersection areas
y1 = np.maximum(box[0], boxes[:, 0])
y2 = np.minimum(box[2], boxes[:, 2])
x1 = np.maximum(box[1], boxes[:, 1])
x2 = np.minimum(box[3], boxes[:, 3])
intersection = np.maximum(x2 - x1, 0) * np.maximum(y2 - y1, 0)
union = box_area + boxes_area[:] - intersection[:]
iou = intersection / union
return iou
def compute_iou_3D(box, boxes, box_volume, boxes_volume):
"""Calculates IoU of the given box with the array of the given boxes.
box: 1D vector [y1, x1, y2, x2, z1, z2] (typically gt box)
boxes: [boxes_count, (y1, x1, y2, x2, z1, z2)]
box_area: float. the area of 'box'
boxes_area: array of length boxes_count.
Note: the areas are passed in rather than calculated here for
efficency. Calculate once in the caller to avoid duplicate work.
"""
# Calculate intersection areas
y1 = np.maximum(box[0], boxes[:, 0])
y2 = np.minimum(box[2], boxes[:, 2])
x1 = np.maximum(box[1], boxes[:, 1])
x2 = np.minimum(box[3], boxes[:, 3])
z1 = np.maximum(box[4], boxes[:, 4])
z2 = np.minimum(box[5], boxes[:, 5])
intersection = np.maximum(x2 - x1, 0) * np.maximum(y2 - y1, 0) * np.maximum(z2 - z1, 0)
union = box_volume + boxes_volume[:] - intersection[:]
iou = intersection / union
return iou
def compute_overlaps(boxes1, boxes2):
"""Computes IoU overlaps between two sets of boxes.
boxes1, boxes2: [N, (y1, x1, y2, x2)]. / 3D: (z1, z2))
For better performance, pass the largest set first and the smaller second.
:return: (#boxes1, #boxes2), ious of each box of 1 machted with each of 2
"""
# Areas of anchors and GT boxes
if boxes1.shape[1] == 4:
area1 = (boxes1[:, 2] - boxes1[:, 0]) * (boxes1[:, 3] - boxes1[:, 1])
area2 = (boxes2[:, 2] - boxes2[:, 0]) * (boxes2[:, 3] - boxes2[:, 1])
# Compute overlaps to generate matrix [boxes1 count, boxes2 count]
# Each cell contains the IoU value.
overlaps = np.zeros((boxes1.shape[0], boxes2.shape[0]))
for i in range(overlaps.shape[1]):
box2 = boxes2[i] #this is the gt box
overlaps[:, i] = compute_iou_2D(box2, boxes1, area2[i], area1)
return overlaps
else:
# Areas of anchors and GT boxes
volume1 = (boxes1[:, 2] - boxes1[:, 0]) * (boxes1[:, 3] - boxes1[:, 1]) * (boxes1[:, 5] - boxes1[:, 4])
volume2 = (boxes2[:, 2] - boxes2[:, 0]) * (boxes2[:, 3] - boxes2[:, 1]) * (boxes2[:, 5] - boxes2[:, 4])
# Compute overlaps to generate matrix [boxes1 count, boxes2 count]
# Each cell contains the IoU value.
overlaps = np.zeros((boxes1.shape[0], boxes2.shape[0]))
for i in range(boxes2.shape[0]):
box2 = boxes2[i] # this is the gt box
overlaps[:, i] = compute_iou_3D(box2, boxes1, volume2[i], volume1)
return overlaps
def box_refinement(box, gt_box):
"""Compute refinement needed to transform box to gt_box.
box and gt_box are [N, (y1, x1, y2, x2)] / 3D: (z1, z2))
"""
height = box[:, 2] - box[:, 0]
width = box[:, 3] - box[:, 1]
center_y = box[:, 0] + 0.5 * height
center_x = box[:, 1] + 0.5 * width
gt_height = gt_box[:, 2] - gt_box[:, 0]
gt_width = gt_box[:, 3] - gt_box[:, 1]
gt_center_y = gt_box[:, 0] + 0.5 * gt_height
gt_center_x = gt_box[:, 1] + 0.5 * gt_width
dy = (gt_center_y - center_y) / height
dx = (gt_center_x - center_x) / width
dh = torch.log(gt_height / height)
dw = torch.log(gt_width / width)
result = torch.stack([dy, dx, dh, dw], dim=1)
if box.shape[1] > 4:
depth = box[:, 5] - box[:, 4]
center_z = box[:, 4] + 0.5 * depth
gt_depth = gt_box[:, 5] - gt_box[:, 4]
gt_center_z = gt_box[:, 4] + 0.5 * gt_depth
dz = (gt_center_z - center_z) / depth
dd = torch.log(gt_depth / depth)
result = torch.stack([dy, dx, dz, dh, dw, dd], dim=1)
return result
def unmold_mask_2D(mask, bbox, image_shape):
"""Converts a mask generated by the neural network into a format similar
to it's original shape.
mask: [height, width] of type float. A small, typically 28x28 mask.
bbox: [y1, x1, y2, x2]. The box to fit the mask in.
Returns a binary mask with the same size as the original image.
"""
y1, x1, y2, x2 = bbox
out_zoom = [y2 - y1, x2 - x1]
zoom_factor = [i / j for i, j in zip(out_zoom, mask.shape)]
mask = scipy.ndimage.zoom(mask, zoom_factor, order=1).astype(np.float32)
# Put the mask in the right location.
full_mask = np.zeros(image_shape[:2]) #only y,x
full_mask[y1:y2, x1:x2] = mask
return full_mask
def unmold_mask_2D_torch(mask, bbox, image_shape):
"""Converts a mask generated by the neural network into a format similar
to it's original shape.
mask: [height, width] of type float. A small, typically 28x28 mask.
bbox: [y1, x1, y2, x2]. The box to fit the mask in.
Returns a binary mask with the same size as the original image.
"""
y1, x1, y2, x2 = bbox
out_zoom = [(y2 - y1).float(), (x2 - x1).float()]
zoom_factor = [i / j for i, j in zip(out_zoom, mask.shape)]
mask = mask.unsqueeze(0).unsqueeze(0)
mask = torch.nn.functional.interpolate(mask, scale_factor=zoom_factor)
mask = mask[0][0]
#mask = scipy.ndimage.zoom(mask.cpu().numpy(), zoom_factor, order=1).astype(np.float32)
#mask = torch.from_numpy(mask).cuda()
# Put the mask in the right location.
full_mask = torch.zeros(image_shape[:2]) # only y,x
full_mask[y1:y2, x1:x2] = mask
return full_mask
def unmold_mask_3D(mask, bbox, image_shape):
"""Converts a mask generated by the neural network into a format similar
to it's original shape.
mask: [height, width] of type float. A small, typically 28x28 mask.
bbox: [y1, x1, y2, x2, z1, z2]. The box to fit the mask in.
Returns a binary mask with the same size as the original image.
"""
y1, x1, y2, x2, z1, z2 = bbox
out_zoom = [y2 - y1, x2 - x1, z2 - z1]
zoom_factor = [i/j for i,j in zip(out_zoom, mask.shape)]
mask = scipy.ndimage.zoom(mask, zoom_factor, order=1).astype(np.float32)
# Put the mask in the right location.
full_mask = np.zeros(image_shape[:3])
full_mask[y1:y2, x1:x2, z1:z2] = mask
return full_mask
def nms_numpy(box_coords, scores, thresh):
""" non-maximum suppression on 2D or 3D boxes in numpy.
:param box_coords: [y1,x1,y2,x2 (,z1,z2)] with y1<=y2, x1<=x2, z1<=z2.
:param scores: ranking scores (higher score == higher rank) of boxes.
:param thresh: IoU threshold for clustering.
:return:
"""
y1 = box_coords[:, 0]
x1 = box_coords[:, 1]
y2 = box_coords[:, 2]
x2 = box_coords[:, 3]
assert np.all(y1 <= y2) and np.all(x1 <= x2), """"the definition of the coordinates is crucially important here:
coordinates of which maxima are taken need to be the lower coordinates"""
areas = (x2 - x1) * (y2 - y1)
is_3d = box_coords.shape[1] == 6
if is_3d: # 3-dim case
z1 = box_coords[:, 4]
z2 = box_coords[:, 5]
assert np.all(z1<=z2), """"the definition of the coordinates is crucially important here:
coordinates of which maxima are taken need to be the lower coordinates"""
areas *= (z2 - z1)
order = scores.argsort()[::-1]
keep = []
while order.size > 0: # order is the sorted index. maps order to index: order[1] = 24 means (rank1, ix 24)
i = order[0] # highest scoring element
yy1 = np.maximum(y1[i], y1[order]) # highest scoring element still in >order<, is compared to itself, that is okay.
xx1 = np.maximum(x1[i], x1[order])
yy2 = np.minimum(y2[i], y2[order])
xx2 = np.minimum(x2[i], x2[order])
h = np.maximum(0.0, yy2 - yy1)
w = np.maximum(0.0, xx2 - xx1)
inter = h * w
if is_3d:
zz1 = np.maximum(z1[i], z1[order])
zz2 = np.minimum(z2[i], z2[order])
d = np.maximum(0.0, zz2 - zz1)
inter *= d
iou = inter / (areas[i] + areas[order] - inter)
non_matches = np.nonzero(iou <= thresh)[0] # get all elements that were not matched and discard all others.
order = order[non_matches]
keep.append(i)
return keep
############################################################
# M-RCNN
############################################################
def refine_proposals(rpn_pred_probs, rpn_pred_deltas, proposal_count, batch_anchors, cf):
"""
Receives anchor scores and selects a subset to pass as proposals
to the second stage. Filtering is done based on anchor scores and
non-max suppression to remove overlaps. It also applies bounding
box refinement details to anchors.
:param rpn_pred_probs: (b, n_anchors, 2)
:param rpn_pred_deltas: (b, n_anchors, (y, x, (z), log(h), log(w), (log(d))))
:return: batch_normalized_props: Proposals in normalized coordinates (b, proposal_count, (y1, x1, y2, x2, (z1), (z2), score))
:return: batch_out_proposals: Box coords + RPN foreground scores
for monitoring/plotting (b, proposal_count, (y1, x1, y2, x2, (z1), (z2), score))
"""
std_dev = torch.from_numpy(cf.rpn_bbox_std_dev[None]).float().cuda()
norm = torch.from_numpy(cf.scale).float().cuda()
anchors = batch_anchors.clone()
batch_scores = rpn_pred_probs[:, :, 1]
# norm deltas
batch_deltas = rpn_pred_deltas * std_dev
batch_normalized_props = []
batch_out_proposals = []
# loop over batch dimension.
for ix in range(batch_scores.shape[0]):
scores = batch_scores[ix]
deltas = batch_deltas[ix]
non_nans = deltas == deltas
assert torch.all(non_nans), "deltas have nans: {}".format(deltas[~non_nans])
non_nans = anchors == anchors
assert torch.all(non_nans), "anchors have nans: {}".format(anchors[~non_nans])
# improve performance by trimming to top anchors by score
# and doing the rest on the smaller subset.
pre_nms_limit = min(cf.pre_nms_limit, anchors.size()[0])
scores, order = scores.sort(descending=True)
order = order[:pre_nms_limit]
scores = scores[:pre_nms_limit]
deltas = deltas[order, :]
# apply deltas to anchors to get refined anchors and filter with non-maximum suppression.
if batch_deltas.shape[-1] == 4:
boxes = apply_box_deltas_2D(anchors[order, :], deltas)
non_nans = boxes == boxes
assert torch.all(non_nans), "unnormalized boxes before clip/after delta apply have nans: {}".format(boxes[~non_nans])
boxes = clip_boxes_2D(boxes, cf.window)
else:
boxes = apply_box_deltas_3D(anchors[order, :], deltas)
boxes = clip_boxes_3D(boxes, cf.window)
non_nans = boxes == boxes
assert torch.all(non_nans), "unnormalized boxes before nms/after clip have nans: {}".format(boxes[~non_nans])
# boxes are y1,x1,y2,x2, torchvision-nms requires x1,y1,x2,y2, but consistent swap x<->y is irrelevant.
keep = nms.nms(boxes, scores, cf.rpn_nms_threshold)
keep = keep[:proposal_count]
boxes = boxes[keep, :]
rpn_scores = scores[keep][:, None]
# pad missing boxes with 0.
if boxes.shape[0] < proposal_count:
n_pad_boxes = proposal_count - boxes.shape[0]
zeros = torch.zeros([n_pad_boxes, boxes.shape[1]]).cuda()
boxes = torch.cat([boxes, zeros], dim=0)
zeros = torch.zeros([n_pad_boxes, rpn_scores.shape[1]]).cuda()
rpn_scores = torch.cat([rpn_scores, zeros], dim=0)
# concat box and score info for monitoring/plotting.
batch_out_proposals.append(torch.cat((boxes, rpn_scores), 1).cpu().data.numpy())
# normalize dimensions to range of 0 to 1.
non_nans = boxes == boxes
assert torch.all(non_nans), "unnormalized boxes after nms have nans: {}".format(boxes[~non_nans])
normalized_boxes = boxes / norm
where = normalized_boxes <=1
assert torch.all(where), "normalized box coords >1 found:\n {}\n".format(normalized_boxes[~where])
# add again batch dimension
batch_normalized_props.append(torch.cat((normalized_boxes, rpn_scores), 1).unsqueeze(0))
batch_normalized_props = torch.cat(batch_normalized_props)
batch_out_proposals = np.array(batch_out_proposals)
return batch_normalized_props, batch_out_proposals
def pyramid_roi_align(feature_maps, rois, pool_size, pyramid_levels, dim):
"""
Implements ROI Pooling on multiple levels of the feature pyramid.
:param feature_maps: list of feature maps, each of shape (b, c, y, x , (z))
:param rois: proposals (normalized coords.) as returned by RPN. contain info about original batch element allocation.
(n_proposals, (y1, x1, y2, x2, (z1), (z2), batch_ixs)
:param pool_size: list of poolsizes in dims: [x, y, (z)]
:param pyramid_levels: list. [0, 1, 2, ...]
:return: pooled: pooled feature map rois (n_proposals, c, poolsize_y, poolsize_x, (poolsize_z))
Output:
Pooled regions in the shape: [num_boxes, height, width, channels].
The width and height are those specific in the pool_shape in the layer
constructor.
"""
boxes = rois[:, :dim*2]
batch_ixs = rois[:, dim*2]
# Assign each ROI to a level in the pyramid based on the ROI area.
if dim == 2:
y1, x1, y2, x2 = boxes.chunk(4, dim=1)
else:
y1, x1, y2, x2, z1, z2 = boxes.chunk(6, dim=1)
h = y2 - y1
w = x2 - x1
# Equation 1 in https://arxiv.org/abs/1612.03144. Account for
# the fact that our coordinates are normalized here.
# divide sqrt(h*w) by 1 instead image_area.
roi_level = (4 + torch.log2(torch.sqrt(h*w))).round().int().clamp(pyramid_levels[0], pyramid_levels[-1])
# if Pyramid contains additional level P6, adapt the roi_level assignment accordingly.
if len(pyramid_levels) == 5:
roi_level[h*w > 0.65] = 5
# Loop through levels and apply ROI pooling to each.
pooled = []
box_to_level = []
fmap_shapes = [f.shape for f in feature_maps]
for level_ix, level in enumerate(pyramid_levels):
ix = roi_level == level
if not ix.any():
continue
ix = torch.nonzero(ix)[:, 0]
level_boxes = boxes[ix, :]
# re-assign rois to feature map of original batch element.
ind = batch_ixs[ix].int()
# Keep track of which box is mapped to which level
box_to_level.append(ix)
# Stop gradient propogation to ROI proposals
level_boxes = level_boxes.detach()
if len(pool_size) == 2:
# remap to feature map coordinate system
y_exp, x_exp = fmap_shapes[level_ix][2:] # exp = expansion
level_boxes.mul_(torch.tensor([y_exp, x_exp, y_exp, x_exp], dtype=torch.float32).cuda())
pooled_features = roi_align.roi_align_2d(feature_maps[level_ix],
torch.cat((ind.unsqueeze(1).float(), level_boxes), dim=1),
pool_size)
else:
y_exp, x_exp, z_exp = fmap_shapes[level_ix][2:]
level_boxes.mul_(torch.tensor([y_exp, x_exp, y_exp, x_exp, z_exp, z_exp], dtype=torch.float32).cuda())
pooled_features = roi_align.roi_align_3d(feature_maps[level_ix],
torch.cat((ind.unsqueeze(1).float(), level_boxes), dim=1),
pool_size)
pooled.append(pooled_features)
# Pack pooled features into one tensor
pooled = torch.cat(pooled, dim=0)
# Pack box_to_level mapping into one array and add another
# column representing the order of pooled boxes
box_to_level = torch.cat(box_to_level, dim=0)
# Rearrange pooled features to match the order of the original boxes
_, box_to_level = torch.sort(box_to_level)
pooled = pooled[box_to_level, :, :]
return pooled
def roi_align_3d_numpy(input: np.ndarray, rois, output_size: tuple,
spatial_scale: float = 1., sampling_ratio: int = -1) -> np.ndarray:
""" This fct mainly serves as a verification method for 3D CUDA implementation of RoIAlign, it's highly
inefficient due to the nested loops.
:param input: (ndarray[N, C, H, W, D]): input feature map
:param rois: list (N,K(n), 6), K(n) = nr of rois in batch-element n, single roi of format (y1,x1,y2,x2,z1,z2)
:param output_size:
:param spatial_scale:
:param sampling_ratio:
:return: (List[N, K(n), C, output_size[0], output_size[1], output_size[2]])
"""
out_height, out_width, out_depth = output_size
coord_grid = tuple([np.linspace(0, input.shape[dim] - 1, num=input.shape[dim]) for dim in range(2, 5)])
pooled_rois = [[]] * len(rois)
assert len(rois) == input.shape[0], "batch dim mismatch, rois: {}, input: {}".format(len(rois), input.shape[0])
print("Numpy 3D RoIAlign progress:", end="\n")
for b in range(input.shape[0]):
for roi in tqdm.tqdm(rois[b]):
y1, x1, y2, x2, z1, z2 = np.array(roi) * spatial_scale
roi_height = max(float(y2 - y1), 1.)
roi_width = max(float(x2 - x1), 1.)
roi_depth = max(float(z2 - z1), 1.)
if sampling_ratio <= 0:
sampling_ratio_h = int(np.ceil(roi_height / out_height))
sampling_ratio_w = int(np.ceil(roi_width / out_width))
sampling_ratio_d = int(np.ceil(roi_depth / out_depth))
else:
sampling_ratio_h = sampling_ratio_w = sampling_ratio_d = sampling_ratio # == n points per bin
bin_height = roi_height / out_height
bin_width = roi_width / out_width
bin_depth = roi_depth / out_depth
n_points = sampling_ratio_h * sampling_ratio_w * sampling_ratio_d
pooled_roi = np.empty((input.shape[1], out_height, out_width, out_depth), dtype="float32")
for chan in range(input.shape[1]):
lin_interpolator = scipy.interpolate.RegularGridInterpolator(coord_grid, input[b, chan],
method="linear")
for bin_iy in range(out_height):
for bin_ix in range(out_width):
for bin_iz in range(out_depth):
bin_val = 0.
for i in range(sampling_ratio_h):
for j in range(sampling_ratio_w):
for k in range(sampling_ratio_d):
loc_ijk = [
y1 + bin_iy * bin_height + (i + 0.5) * (bin_height / sampling_ratio_h),
x1 + bin_ix * bin_width + (j + 0.5) * (bin_width / sampling_ratio_w),
z1 + bin_iz * bin_depth + (k + 0.5) * (bin_depth / sampling_ratio_d)]
# print("loc_ijk", loc_ijk)
if not (np.any([c < -1.0 for c in loc_ijk]) or loc_ijk[0] > input.shape[2] or
loc_ijk[1] > input.shape[3] or loc_ijk[2] > input.shape[4]):
for catch_case in range(3):
# catch on-border cases
if int(loc_ijk[catch_case]) == input.shape[catch_case + 2] - 1:
loc_ijk[catch_case] = input.shape[catch_case + 2] - 1
bin_val += lin_interpolator(loc_ijk)
pooled_roi[chan, bin_iy, bin_ix, bin_iz] = bin_val / n_points
pooled_rois[b].append(pooled_roi)
return np.array(pooled_rois)
def refine_detections(cf, batch_ixs, rois, deltas, scores, regressions):
"""
Refine classified proposals (apply deltas to rpn rois), filter overlaps (nms) and return final detections.
:param rois: (n_proposals, 2 * dim) normalized boxes as proposed by RPN. n_proposals = batch_size * POST_NMS_ROIS
:param deltas: (n_proposals, n_classes, 2 * dim) box refinement deltas as predicted by mrcnn bbox regressor.
:param batch_ixs: (n_proposals) batch element assignment info for re-allocation.
:param scores: (n_proposals, n_classes) probabilities for all classes per roi as predicted by mrcnn classifier.
:param regressions: (n_proposals, n_classes, regression_features (+1 for uncertainty if predicted) regression vector
:return: result: (n_final_detections, (y1, x1, y2, x2, (z1), (z2), batch_ix, pred_class_id, pred_score, *regression vector features))
"""
# class IDs per ROI. Since scores of all classes are of interest (not just max class), all are kept at this point.
class_ids = []
fg_classes = cf.head_classes - 1
# repeat vectors to fill in predictions for all foreground classes.
for ii in range(1, fg_classes + 1):
class_ids += [ii] * rois.shape[0]
class_ids = torch.from_numpy(np.array(class_ids)).cuda()
batch_ixs = batch_ixs.repeat(fg_classes)
rois = rois.repeat(fg_classes, 1)
deltas = deltas.repeat(fg_classes, 1, 1)
scores = scores.repeat(fg_classes, 1)
regressions = regressions.repeat(fg_classes, 1, 1)
# get class-specific scores and bounding box deltas
idx = torch.arange(class_ids.size()[0]).long().cuda()
# using idx instead of slice [:,] squashes first dimension.
#len(class_ids)>scores.shape[1] --> probs is broadcasted by expansion from fg_classes-->len(class_ids)
batch_ixs = batch_ixs[idx]
deltas_specific = deltas[idx, class_ids]
class_scores = scores[idx, class_ids]
regressions = regressions[idx, class_ids]
# apply bounding box deltas. re-scale to image coordinates.
std_dev = torch.from_numpy(np.reshape(cf.rpn_bbox_std_dev, [1, cf.dim * 2])).float().cuda()
scale = torch.from_numpy(cf.scale).float().cuda()
refined_rois = apply_box_deltas_2D(rois, deltas_specific * std_dev) * scale if cf.dim == 2 else \
apply_box_deltas_3D(rois, deltas_specific * std_dev) * scale
# round and cast to int since we're dealing with pixels now
refined_rois = clip_to_window(cf.window, refined_rois)
refined_rois = torch.round(refined_rois)
# filter out low confidence boxes
keep = idx
keep_bool = (class_scores >= cf.model_min_confidence)
if not 0 in torch.nonzero(keep_bool).size():
score_keep = torch.nonzero(keep_bool)[:, 0]
pre_nms_class_ids = class_ids[score_keep]
pre_nms_rois = refined_rois[score_keep]
pre_nms_scores = class_scores[score_keep]
pre_nms_batch_ixs = batch_ixs[score_keep]
for j, b in enumerate(unique1d(pre_nms_batch_ixs)):
bixs = torch.nonzero(pre_nms_batch_ixs == b)[:, 0]
bix_class_ids = pre_nms_class_ids[bixs]
bix_rois = pre_nms_rois[bixs]
bix_scores = pre_nms_scores[bixs]
for i, class_id in enumerate(unique1d(bix_class_ids)):
ixs = torch.nonzero(bix_class_ids == class_id)[:, 0]
# nms expects boxes sorted by score.
ix_rois = bix_rois[ixs]
ix_scores = bix_scores[ixs]
ix_scores, order = ix_scores.sort(descending=True)
ix_rois = ix_rois[order, :]
class_keep = nms.nms(ix_rois, ix_scores, cf.detection_nms_threshold)
# map indices back.
class_keep = keep[score_keep[bixs[ixs[order[class_keep]]]]]
# merge indices over classes for current batch element
b_keep = class_keep if i == 0 else unique1d(torch.cat((b_keep, class_keep)))
# only keep top-k boxes of current batch-element
top_ids = class_scores[b_keep].sort(descending=True)[1][:cf.model_max_instances_per_batch_element]
b_keep = b_keep[top_ids]
# merge indices over batch elements.
batch_keep = b_keep if j == 0 else unique1d(torch.cat((batch_keep, b_keep)))
keep = batch_keep
else:
keep = torch.tensor([0]).long().cuda()
# arrange output
output = [refined_rois[keep], batch_ixs[keep].unsqueeze(1)]
output += [class_ids[keep].unsqueeze(1).float(), class_scores[keep].unsqueeze(1)]
output += [regressions[keep]]
result = torch.cat(output, dim=1)
# shape: (n_keeps, catted feats), catted feats: [0:dim*2] are box_coords, [dim*2] are batch_ics,
# [dim*2+1] are class_ids, [dim*2+2] are scores, [dim*2+3:] are regression vector features (incl uncertainty)
return result
def loss_example_mining(cf, batch_proposals, batch_gt_boxes, batch_gt_masks, batch_roi_scores,
batch_gt_class_ids, batch_gt_regressions):
"""
Subsamples proposals for mrcnn losses and generates targets. Sampling is done per batch element, seems to have positive
effects on training, as opposed to sampling over entire batch. Negatives are sampled via stochastic hard-example mining
(SHEM), where a number of negative proposals is drawn from larger pool of highest scoring proposals for stochasticity.
Scoring is obtained here as the max over all foreground probabilities as returned by mrcnn_classifier (worked better than
loss-based class-balancing methods like "online hard-example mining" or "focal loss".)
Classification-regression duality: regressions can be given along with classes (at least fg/bg, only class scores
are used for ranking).
:param batch_proposals: (n_proposals, (y1, x1, y2, x2, (z1), (z2), batch_ixs).
boxes as proposed by RPN. n_proposals here is determined by batch_size * POST_NMS_ROIS.
:param mrcnn_class_logits: (n_proposals, n_classes)
:param batch_gt_boxes: list over batch elements. Each element is a list over the corresponding roi target coordinates.
:param batch_gt_masks: list over batch elements. Each element is binary mask of shape (n_gt_rois, c, y, x, (z))
:param batch_gt_class_ids: list over batch elements. Each element is a list over the corresponding roi target labels.
if no classes predicted (only fg/bg from RPN): expected as pseudo classes [0, 1] for bg, fg.
:param batch_gt_regressions: list over b elements. Each element is a regression target vector. if None--> pseudo
:return: sample_indices: (n_sampled_rois) indices of sampled proposals to be used for loss functions.
:return: target_class_ids: (n_sampled_rois)containing target class labels of sampled proposals.
:return: target_deltas: (n_sampled_rois, 2 * dim) containing target deltas of sampled proposals for box refinement.
:return: target_masks: (n_sampled_rois, y, x, (z)) containing target masks of sampled proposals.
"""
# normalization of target coordinates
#global sample_regressions
if cf.dim == 2:
h, w = cf.patch_size
scale = torch.from_numpy(np.array([h, w, h, w])).float().cuda()
else:
h, w, z = cf.patch_size
scale = torch.from_numpy(np.array([h, w, h, w, z, z])).float().cuda()
positive_count = 0
negative_count = 0
sample_positive_indices = []
sample_negative_indices = []
sample_deltas = []
sample_masks = []
sample_class_ids = []
if batch_gt_regressions is not None:
sample_regressions = []
else:
target_regressions = torch.FloatTensor().cuda()
std_dev = torch.from_numpy(cf.bbox_std_dev).float().cuda()
# loop over batch and get positive and negative sample rois.
for b in range(len(batch_gt_boxes)):
gt_masks = torch.from_numpy(batch_gt_masks[b]).float().cuda()
gt_class_ids = torch.from_numpy(batch_gt_class_ids[b]).int().cuda()
if batch_gt_regressions is not None:
gt_regressions = torch.from_numpy(batch_gt_regressions[b]).float().cuda()
#if np.any(batch_gt_class_ids[b] > 0): # skip roi selection for no gt images.
if np.any([len(coords)>0 for coords in batch_gt_boxes[b]]):
gt_boxes = torch.from_numpy(batch_gt_boxes[b]).float().cuda() / scale
else:
gt_boxes = torch.FloatTensor().cuda()
# get proposals and indices of current batch element.
proposals = batch_proposals[batch_proposals[:, -1] == b][:, :-1]
batch_element_indices = torch.nonzero(batch_proposals[:, -1] == b).squeeze(1)
# Compute overlaps matrix [proposals, gt_boxes]
if not 0 in gt_boxes.size():
if gt_boxes.shape[1] == 4:
assert cf.dim == 2, "gt_boxes shape {} doesnt match cf.dim{}".format(gt_boxes.shape, cf.dim)
overlaps = bbox_overlaps_2D(proposals, gt_boxes)
else:
assert cf.dim == 3, "gt_boxes shape {} doesnt match cf.dim{}".format(gt_boxes.shape, cf.dim)
overlaps = bbox_overlaps_3D(proposals, gt_boxes)
# Determine positive and negative ROIs
roi_iou_max = torch.max(overlaps, dim=1)[0]
# 1. Positive ROIs are those with >= 0.5 IoU with a GT box
positive_roi_bool = roi_iou_max >= (0.5 if cf.dim == 2 else 0.3)
# 2. Negative ROIs are those with < 0.1 with every GT box.
negative_roi_bool = roi_iou_max < (0.1 if cf.dim == 2 else 0.01)
else:
positive_roi_bool = torch.FloatTensor().cuda()
negative_roi_bool = torch.from_numpy(np.array([1]*proposals.shape[0])).cuda()
# Sample Positive ROIs
if not 0 in torch.nonzero(positive_roi_bool).size():
positive_indices = torch.nonzero(positive_roi_bool).squeeze(1)
positive_samples = int(cf.train_rois_per_image * cf.roi_positive_ratio)
rand_idx = torch.randperm(positive_indices.size()[0])
rand_idx = rand_idx[:positive_samples].cuda()
positive_indices = positive_indices[rand_idx]
positive_samples = positive_indices.size()[0]
positive_rois = proposals[positive_indices, :]
# Assign positive ROIs to GT boxes.
positive_overlaps = overlaps[positive_indices, :]
roi_gt_box_assignment = torch.max(positive_overlaps, dim=1)[1]
roi_gt_boxes = gt_boxes[roi_gt_box_assignment, :]
roi_gt_class_ids = gt_class_ids[roi_gt_box_assignment]
if batch_gt_regressions is not None:
roi_gt_regressions = gt_regressions[roi_gt_box_assignment]
# Compute bbox refinement targets for positive ROIs
deltas = box_refinement(positive_rois, roi_gt_boxes)
deltas /= std_dev
roi_masks = gt_masks[roi_gt_box_assignment]
assert roi_masks.shape[1] == 1, "gt masks have more than one channel --> is this desired?"
# Compute mask targets
boxes = positive_rois
box_ids = torch.arange(roi_masks.shape[0]).cuda().unsqueeze(1).float()
if len(cf.mask_shape) == 2:
y_exp, x_exp = roi_masks.shape[2:] # exp = expansion
boxes.mul_(torch.tensor([y_exp, x_exp, y_exp, x_exp], dtype=torch.float32).cuda())
masks = roi_align.roi_align_2d(roi_masks,
torch.cat((box_ids, boxes), dim=1),
cf.mask_shape)
else:
y_exp, x_exp, z_exp = roi_masks.shape[2:] # exp = expansion
boxes.mul_(torch.tensor([y_exp, x_exp, y_exp, x_exp, z_exp, z_exp], dtype=torch.float32).cuda())
masks = roi_align.roi_align_3d(roi_masks,
torch.cat((box_ids, boxes), dim=1),
cf.mask_shape)
masks = masks.squeeze(1)
# Threshold mask pixels at 0.5 to have GT masks be 0 or 1 to use with
# binary cross entropy loss.
masks = torch.round(masks)
sample_positive_indices.append(batch_element_indices[positive_indices])
sample_deltas.append(deltas)
sample_masks.append(masks)
sample_class_ids.append(roi_gt_class_ids)
if batch_gt_regressions is not None:
sample_regressions.append(roi_gt_regressions)
positive_count += positive_samples
else:
positive_samples = 0
# Sample negative ROIs. Add enough to maintain positive:negative ratio, but at least 1. Sample via SHEM.
if not 0 in torch.nonzero(negative_roi_bool).size():
negative_indices = torch.nonzero(negative_roi_bool).squeeze(1)
r = 1.0 / cf.roi_positive_ratio
b_neg_count = np.max((int(r * positive_samples - positive_samples), 1))
roi_scores_neg = batch_roi_scores[batch_element_indices[negative_indices]]
raw_sampled_indices = shem(roi_scores_neg, b_neg_count, cf.shem_poolsize)
sample_negative_indices.append(batch_element_indices[negative_indices[raw_sampled_indices]])
negative_count += raw_sampled_indices.size()[0]
if len(sample_positive_indices) > 0:
target_deltas = torch.cat(sample_deltas)
target_masks = torch.cat(sample_masks)
target_class_ids = torch.cat(sample_class_ids)
if batch_gt_regressions is not None:
target_regressions = torch.cat(sample_regressions)
# Pad target information with zeros for negative ROIs.
if positive_count > 0 and negative_count > 0:
sample_indices = torch.cat((torch.cat(sample_positive_indices), torch.cat(sample_negative_indices)), dim=0)
zeros = torch.zeros(negative_count, cf.dim * 2).cuda()
target_deltas = torch.cat([target_deltas, zeros], dim=0)
zeros = torch.zeros(negative_count, *cf.mask_shape).cuda()
target_masks = torch.cat([target_masks, zeros], dim=0)
zeros = torch.zeros(negative_count).int().cuda()
target_class_ids = torch.cat([target_class_ids, zeros], dim=0)
if batch_gt_regressions is not None:
# regression targets need to have 0 as background/negative with below practice
if 'regression_bin' in cf.prediction_tasks:
zeros = torch.zeros(negative_count, dtype=torch.float).cuda()
else:
zeros = torch.zeros(negative_count, cf.regression_n_features, dtype=torch.float).cuda()
target_regressions = torch.cat([target_regressions, zeros], dim=0)
elif positive_count > 0:
sample_indices = torch.cat(sample_positive_indices)
elif negative_count > 0:
sample_indices = torch.cat(sample_negative_indices)
target_deltas = torch.zeros(negative_count, cf.dim * 2).cuda()
target_masks = torch.zeros(negative_count, *cf.mask_shape).cuda()
target_class_ids = torch.zeros(negative_count).int().cuda()
if batch_gt_regressions is not None:
if 'regression_bin' in cf.prediction_tasks:
target_regressions = torch.zeros(negative_count, dtype=torch.float).cuda()
else:
target_regressions = torch.zeros(negative_count, cf.regression_n_features, dtype=torch.float).cuda()
else:
sample_indices = torch.LongTensor().cuda()
target_class_ids = torch.IntTensor().cuda()
target_deltas = torch.FloatTensor().cuda()
target_masks = torch.FloatTensor().cuda()
target_regressions = torch.FloatTensor().cuda()
return sample_indices, target_deltas, target_masks, target_class_ids, target_regressions
############################################################
# Anchors
############################################################
def generate_anchors(scales, ratios, shape, feature_stride, anchor_stride):
"""
scales: 1D array of anchor sizes in pixels. Example: [32, 64, 128]
ratios: 1D array of anchor ratios of width/height. Example: [0.5, 1, 2]
shape: [height, width] spatial shape of the feature map over which
to generate anchors.
feature_stride: Stride of the feature map relative to the image in pixels.
anchor_stride: Stride of anchors on the feature map. For example, if the
value is 2 then generate anchors for every other feature map pixel.
"""
# Get all combinations of scales and ratios
scales, ratios = np.meshgrid(np.array(scales), np.array(ratios))
scales = scales.flatten()
ratios = ratios.flatten()
# Enumerate heights and widths from scales and ratios
heights = scales / np.sqrt(ratios)
widths = scales * np.sqrt(ratios)
# Enumerate shifts in feature space
shifts_y = np.arange(0, shape[0], anchor_stride) * feature_stride
shifts_x = np.arange(0, shape[1], anchor_stride) * feature_stride
shifts_x, shifts_y = | np.meshgrid(shifts_x, shifts_y) | numpy.meshgrid |
import numpy as np
import os
import re
import requests
import sys
import time
from netCDF4 import Dataset
import pandas as pd
from bs4 import BeautifulSoup
from tqdm import tqdm
# setup constants used to access the data from the different M2M interfaces
BASE_URL = 'https://ooinet.oceanobservatories.org/api/m2m/' # base M2M URL
SENSOR_URL = '12576/sensor/inv/' # Sensor Information
# setup access credentials
AUTH = ['OOIAPI-853A3LA6QI3L62', '<KEY>']
def M2M_Call(uframe_dataset_name, start_date, end_date):
options = '?beginDT=' + start_date + '&endDT=' + end_date + '&format=application/netcdf'
r = requests.get(BASE_URL + SENSOR_URL + uframe_dataset_name + options, auth=(AUTH[0], AUTH[1]))
if r.status_code == requests.codes.ok:
data = r.json()
else:
return None
# wait until the request is completed
print('Waiting for OOINet to process and prepare data request, this may take up to 20 minutes')
url = [url for url in data['allURLs'] if re.match(r'.*async_results.*', url)][0]
check_complete = url + '/status.txt'
with tqdm(total=400, desc='Waiting') as bar:
for i in range(400):
r = requests.get(check_complete)
bar.update(1)
if r.status_code == requests.codes.ok:
bar.n = 400
bar.last_print_n = 400
bar.refresh()
print('\nrequest completed in %f minutes.' % elapsed)
break
else:
time.sleep(3)
elapsed = (i * 3) / 60
return data
def M2M_Files(data, tag=''):
"""
Use a regex tag combined with the results of the M2M data request to collect the data from the THREDDS catalog.
Collected data is gathered into an xarray dataset for further processing.
:param data: JSON object returned from M2M data request with details on where the data is to be found for download
:param tag: regex tag to use in discriminating the data files, so we only collect the correct ones
:return: the collected data as an xarray dataset
"""
# Create a list of the files from the request above using a simple regex as a tag to discriminate the files
url = [url for url in data['allURLs'] if re.match(r'.*thredds.*', url)][0]
files = list_files(url, tag)
return files
def list_files(url, tag=''):
"""
Function to create a list of the NetCDF data files in the THREDDS catalog created by a request to the M2M system.
:param url: URL to user's THREDDS catalog specific to a data request
:param tag: regex pattern used to distinguish files of interest
:return: list of files in the catalog with the URL path set relative to the catalog
"""
page = requests.get(url).text
soup = BeautifulSoup(page, 'html.parser')
pattern = re.compile(tag)
return [node.get('href') for node in soup.find_all('a', text=pattern)]
def M2M_Data(nclist,variables):
thredds = 'https://opendap.oceanobservatories.org/thredds/dodsC/ooi/'
#nclist is going to contain more than one url eventually
for jj in range(len(nclist)):
url=nclist[jj]
url=url[25:]
dap_url = thredds + url + '#fillmismatch'
openFile = Dataset(dap_url,'r')
for ii in range(len(variables)):
dum = openFile.variables[variables[ii].name]
variables[ii].data = np.append(variables[ii].data, dum[:].data)
tmp = variables[0].data/60/60/24
time_converted = pd.to_datetime(tmp, unit='D', origin=pd.Timestamp('1900-01-01'))
return variables, time_converted
class var(object):
def __init__(self):
"""A Class that generically holds data with a variable name
and the units as attributes"""
self.name = ''
self.data = np.array([])
self.units = ''
def __repr__(self):
return_str = "name: " + self.name + '\n'
return_str += "units: " + self.units + '\n'
return_str += "data: size: " + str(self.data.shape)
return return_str
class structtype(object):
def __init__(self):
""" A class that imitates a Matlab structure type
"""
self._data = []
def __getitem__(self, index):
"""implement index behavior in the struct"""
if index == len(self._data):
self._data.append(var())
return self._data[index]
def __len__(self):
return len(self._data)
def M2M_URLs(platform_name,node,instrument_class,method):
var_list = structtype()
#MOPAK
if platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/SBD17/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD11/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD11/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/SBD17/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD11/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD11/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSPM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/SBS01/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#METBK
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
#FLORT
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/SBD17/06-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/SBD17/06-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/04-FLORTK000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
#FDCHP
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'FDCHP' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/08-FDCHPA000/telemetered/fdchp_a_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#DOSTA
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD37/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD37/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD37/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD37/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/02-DOFSTK000/telemetered/dofst_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dofst_k_oxygen_l2'
var_list[2].name = 'dofst_k_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'Hz'
var_list[3].units = 'dbar'
#ADCP
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/01-ADCPTA000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/01-ADCPTC000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/01-ADCPTA000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/01-ADCPTC000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/04-ADCPTM000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/04-ADCPTM000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/04-ADCPTC000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/04-ADCPSJ000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
#ZPLSC
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#WAVSS
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_statistics'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_statistics'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_statistics'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_statistics'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
#VELPT
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/SBD17/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD11/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD11/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/SBD17/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD11/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD11/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
#PCO2W
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
#PHSEN
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
#SPKIR
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
#PRESF
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/02-PRESFA000/telemetered/presf_abc_dcl_tide_measurement'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/02-PRESFA000/telemetered/presf_abc_dcl_tide_measurement'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/02-PRESFB000/telemetered/presf_abc_dcl_tide_measurement'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/02-PRESFC000/telemetered/presf_abc_dcl_tide_measurement'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
#CTDBP
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD37/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/SBD17/06-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD37/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/SBD17/06-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD37/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD37/03-CTDBPE000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
#VEL3D
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/01-VEL3DD000/telemetered/vel3d_cd_dcl_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/01-VEL3DD000/telemetered/vel3d_cd_dcl_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/01-VEL3DD000/telemetered/vel3d_cd_dcl_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/01-VEL3DD000/telemetered/vel3d_cd_dcl_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
#VEL3DK
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/01-VEL3DK000/telemetered/vel3d_k_wfp_stc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_k_eastward_velocity'
var_list[2].name = 'vel3d_k_northward_velocity'
var_list[3].name = 'vel3d_k_upward_velocity'
var_list[4].name = 'vel3d_k_heading'
var_list[5].name = 'vel3d_k_pitch'
var_list[6].name = 'vel3d_k_roll'
var_list[7].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'ddegrees'
var_list[5].units = 'ddegrees'
var_list[6].units = 'ddegrees'
var_list[7].units = 'dbar'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/03-CTDPFK000/telemetered/ctdpf_ckl_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'ctdpf_ckl_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdpf_ckl_seawater_pressure'
var_list[5].name = 'ctdpf_ckl_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
#PCO2A
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
#PARAD
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/05-PARADK000/telemetered/parad_k__stc_imodem_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_k_par'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
#OPTAA
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD37/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD37/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD37/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD37/01-OPTAAC000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#NUTNR
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
##
#MOPAK
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/SBD17/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD11/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD11/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/SBD17/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD11/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD11/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSPM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSPM/SBS01/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#METBK
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
#FLORT
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/SBD17/06-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/SBD17/06-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID27/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID27/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID27/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID27/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
#FDCHP
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'FDCHP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD12/08-FDCHPA000/recovered_host/fdchp_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#DOSTA
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID27/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID27/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID27/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID27/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD37/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD37/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD37/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD37/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
#ADCP
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID26/01-ADCPTA000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID26/01-ADCPTC000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID26/01-ADCPTA000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID26/01-ADCPTC000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD35/04-ADCPTM000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD35/04-ADCPTM000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD35/04-ADCPTC000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD35/04-ADCPSJ000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
#WAVSS
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_statistics_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_statistics_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_statistics_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_statistics_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
#VELPT
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/SBD17/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD11/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD11/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
#uframe_dataset_name = 'CE06ISSM/RID16/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
uframe_dataset_name = 'CE06ISSM/RID16/04-VELPTA000/recovered_host/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD11/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD11/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID26/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID26/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID26/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID26/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
#PCO2W
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD35/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD35/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD35/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD35/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
#PHSEN
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID26/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID26/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID26/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID26/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD35/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD35/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD35/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD35/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
#SPKIR
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID26/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID26/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID26/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID26/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
#PRESF
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD35/02-PRESFA000/recovered_host/presf_abc_dcl_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD35/02-PRESFA000/recovered_host/presf_abc_dcl_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD35/02-PRESFB000/recovered_host/presf_abc_dcl_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD35/02-PRESFC000/recovered_host/presf_abc_dcl_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
#CTDBP
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD37/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/SBD17/06-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD37/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/SBD17/06-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID27/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID27/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID27/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID27/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD37/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD37/03-CTDBPE000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
#VEL3D
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD35/01-VEL3DD000/recovered_host/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD35/01-VEL3DD000/recovered_host/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD35/01-VEL3DD000/recovered_host/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD35/01-VEL3DD000/recovered_host/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
#PCO2A
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD12/04-PCO2AA000/recovered_host/pco2a_a_dcl_instrument_water_recovered'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD12/04-PCO2AA000/recovered_host/pco2a_a_dcl_instrument_water_recovered'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD12/04-PCO2AA000/recovered_host/pco2a_a_dcl_instrument_water_recovered'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD12/04-PCO2AA000/recovered_host/pco2a_a_dcl_instrument_water_recovered'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
#OPTAA
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID27/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID27/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID27/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID27/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD37/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD37/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD37/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD37/01-OPTAAC000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#NUTNR
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID26/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID26/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID26/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID26/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/RID16/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD37/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/SBD17/06-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/RID16/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD37/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/SBD17/06-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/RID27/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/RID27/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSSM/RID27/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/RID27/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD37/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD37/03-CTDBPE000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE09OSPM/WFP01/03-CTDPFK000/recovered_wfp/ctdpf_ckl_wfp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdpf_ckl_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdpf_ckl_seawater_pressure'
var_list[5].name = 'ctdpf_ckl_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/RID26/01-ADCPTA000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSSM/RID26/01-ADCPTC000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/RID26/01-ADCPTA000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/RID26/01-ADCPTC000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD35/04-ADCPTM000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD35/04-ADCPTM000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD35/04-ADCPTC000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD35/04-ADCPSJ000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD37/07-ZPLSCC000/recovered_inst/zplsc_echogram_data'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD37/07-ZPLSCC000/recovered_inst/zplsc_echogram_data'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD37/07-ZPLSCC000/recovered_inst/zplsc_echogram_data'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD37/07-ZPLSCC000/recovered_inst/zplsc_echogram_data'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/SBD17/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/SBD11/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSSM/SBD11/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/SBD17/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/SBD11/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/SBD11/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/RID16/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/RID26/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSSM/RID26/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/RID16/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/RID26/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/RID26/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE09OSPM/WFP01/01-VEL3DK000/recovered_wfp/vel3d_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_k_eastward_velocity'
var_list[2].name = 'vel3d_k_northward_velocity'
var_list[3].name = 'vel3d_k_upward_velocity'
var_list[4].name = 'vel3d_k_heading'
var_list[5].name = 'vel3d_k_pitch'
var_list[6].name = 'vel3d_k_roll'
var_list[7].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'ddegrees'
var_list[5].units = 'ddegrees'
var_list[6].units = 'ddegrees'
var_list[7].units = 'dbar'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD35/01-VEL3DD000/recovered_inst/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD35/01-VEL3DD000/recovered_inst/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD35/01-VEL3DD000/recovered_inst/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD35/01-VEL3DD000/recovered_inst/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD35/02-PRESFA000/recovered_inst/presf_abc_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'presf_tide_pressure'
var_list[2].name = 'presf_tide_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD35/02-PRESFA000/recovered_inst/presf_abc_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'presf_tide_pressure'
var_list[2].name = 'presf_tide_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD35/02-PRESFB000/recovered_inst/presf_abc_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'presf_tide_pressure'
var_list[2].name = 'presf_tide_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD35/02-PRESFC000/recovered_inst/presf_abc_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'presf_tide_pressure'
var_list[2].name = 'presf_tide_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/RID16/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/RID26/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSSM/RID26/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/RID16/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/RID26/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/RID26/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD35/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD35/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD35/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD35/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/RID16/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD35/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/RID16/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD35/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD35/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD35/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE09OSPM/WFP01/05-PARADK000/recovered_wfp/parad_k__stc_imodem_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_k_par'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/RID16/07-NUTNRB000/recovered_inst/suna_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/RID26/07-NUTNRB000/recovered_inst/suna_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSSM/RID26/07-NUTNRB000/recovered_inst/suna_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/RID16/07-NUTNRB000/recovered_inst/suna_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/RID26/07-NUTNRB000/recovered_inst/suna_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/RID26/07-NUTNRB000/recovered_inst/suna_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'FDCHP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/SBD12/08-FDCHPA000/recovered_inst/fdchp_a_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/SBD17/06-FLORTD000/recovered_inst/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/SBD17/06-FLORTD000/recovered_inst/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE09OSPM/WFP01/04-FLORTK000/recovered_wfp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE09OSPM/WFP01/02-DOFSTK000/recovered_wfp/dofst_k_wfp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dofst_k_oxygen_l2'
var_list[2].name = 'dofst_k_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'Hz'
var_list[3].units = 'dbar'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/RID16/03-DOSTAD000/recovered_inst/dosta_abcdjm_ctdbp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[3].name = 'ctdbp_seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/RID16/03-DOSTAD000/recovered_inst/dosta_abcdjm_ctdbp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[3].name = 'ctdbp_seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD37/03-DOSTAD000/recovered_inst/dosta_abcdjm_ctdbp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[3].name = 'ctdbp_seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD37/03-DOSTAD000/recovered_inst/dosta_abcdjm_ctdbp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[3].name = 'ctdbp_seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD37/03-DOSTAD000/recovered_inst/dosta_abcdjm_ctdbp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[3].name = 'ctdbp_seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD37/03-DOSTAD000/recovered_inst/dosta_abcdjm_ctdbp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[3].name = 'ctdbp_seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD35/04-ADCPTM000/recovered_inst/adcpt_m_instrument_log9_recovered'
var_list[0].name = 'time'
var_list[1].name = 'significant_wave_height'
var_list[2].name = 'peak_wave_period'
var_list[3].name = 'peak_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'seconds'
var_list[3].units = 'degrees'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD35/04-ADCPTM000/recovered_inst/adcpt_m_instrument_log9_recovered'
var_list[0].name = 'time'
var_list[1].name = 'significant_wave_height'
var_list[2].name = 'peak_wave_period'
var_list[3].name = 'peak_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'seconds'
var_list[3].units = 'degrees'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'CTD' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/06-CTDBPN106/streamed/ctdbp_no_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_no_seawater_pressure'
var_list[5].name = 'ctdbp_no_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'CTD' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/06-CTDBPO108/streamed/ctdbp_no_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_no_seawater_pressure'
var_list[5].name = 'ctdbp_no_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'DOSTA' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/06-CTDBPN106/streamed/ctdbp_no_sample'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'DOSTA' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/06-CTDBPO108/streamed/ctdbp_no_sample'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'PHSEN' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/10-PHSEND103/streamed/phsen_data_record'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'PHSEN' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/10-PHSEND107/streamed/phsen_data_record'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'PCO2W' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/09-PCO2WB103/streamed/pco2w_b_sami_data_record'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'PCO2W' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/09-PCO2WB104/streamed/pco2w_b_sami_data_record'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'ADCP' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/05-ADCPTB104/streamed/adcp_velocity_beam'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'ADCP' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/05-ADCPSI103/streamed/adcp_velocity_beam'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'VEL3D' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/07-VEL3DC108/streamed/vel3d_cd_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'VEL3D' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/07-VEL3DC107/streamed/vel3d_cd_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'OPTAA' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/08-OPTAAD106/streamed/optaa_sample'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'OPTAA' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/08-OPTAAC104/streamed/optaa_sample'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#CSPP Data below
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSP/SP001/08-FLORTJ000/telemetered/flort_dj_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/08-FLORTJ000/recovered_cspp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSP/SP001/08-FLORTJ000/telemetered/flort_dj_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/08-FLORTJ000/recovered_cspp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSP/SP001/02-DOSTAJ000/telemetered/dosta_abcdjm_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[4].name = 'optode_temperature'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'umol/L'
var_list[4].units = 'degC'
var_list[5].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/02-DOSTAJ000/recovered_cspp/dosta_abcdjm_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[4].name = 'optode_temperature'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'umol/L'
var_list[4].units = 'degC'
var_list[5].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSP/SP001/02-DOSTAJ000/telemetered/dosta_abcdjm_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[4].name = 'optode_temperature'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'umol/L'
var_list[4].units = 'degC'
var_list[5].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/02-DOSTAJ000/recovered_cspp/dosta_abcdjm_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[4].name = 'optode_temperature'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'umol/L'
var_list[4].units = 'degC'
var_list[5].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSP/SP001/09-CTDPFJ000/telemetered/ctdpf_j_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temperature'
var_list[2].name = 'salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/09-CTDPFJ000/recovered_cspp/ctdpf_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temperature'
var_list[2].name = 'salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSP/SP001/09-CTDPFJ000/telemetered/ctdpf_j_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temperature'
var_list[2].name = 'salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/09-CTDPFJ000/recovered_cspp/ctdpf_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temperature'
var_list[2].name = 'salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSP/SP001/10-PARADJ000/telemetered/parad_j_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_j_par_counts_output'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/10-PARADJ000/recovered_cspp/parad_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_j_par_counts_output'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSP/SP001/10-PARADJ000/telemetered/parad_j_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_j_par_counts_output'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/10-PARADJ000/recovered_cspp/parad_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_j_par_counts_output'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'NUTNR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/06-NUTNRJ000/recovered_cspp/nutnr_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'salinity_corrected_nitrate'
var_list[2].name = 'nitrate_concentration'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
var_list[3].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'NUTNR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/06-NUTNRJ000/recovered_cspp/nutnr_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'salinity_corrected_nitrate'
var_list[2].name = 'nitrate_concentration'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
var_list[3].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSP/SP001/07-SPKIRJ000/telemetered/spkir_abj_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'SPKIR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/07-SPKIRJ000/recovered_cspp/spkir_abj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSP/SP001/07-SPKIRJ000/telemetered/spkir_abj_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'SPKIR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/07-SPKIRJ000/recovered_cspp/spkir_abj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSP/SP001/05-VELPTJ000/telemetered/velpt_j_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'velpt_j_eastward_velocity'
var_list[2].name = 'velpt_j_northward_velocity'
var_list[3].name = 'velpt_j_upward_velocity'
var_list[4].name = 'heading'
var_list[5].name = 'roll'
var_list[6].name = 'pitch'
var_list[7].name = 'temperature'
var_list[8].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'degrees'
var_list[5].units = 'degrees'
var_list[6].units = 'degrees'
var_list[7].units = 'degC'
var_list[8].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'VELPT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/05-VELPTJ000/recovered_cspp/velpt_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'velpt_j_eastward_velocity'
var_list[2].name = 'velpt_j_northward_velocity'
var_list[3].name = 'velpt_j_upward_velocity'
var_list[4].name = 'heading'
var_list[5].name = 'roll'
var_list[6].name = 'pitch'
var_list[7].name = 'temperature'
var_list[8].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'degrees'
var_list[5].units = 'degrees'
var_list[6].units = 'degrees'
var_list[7].units = 'degC'
var_list[8].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSP/SP001/05-VELPTJ000/telemetered/velpt_j_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'velpt_j_eastward_velocity'
var_list[2].name = 'velpt_j_northward_velocity'
var_list[3].name = 'velpt_j_upward_velocity'
var_list[4].name = 'heading'
var_list[5].name = 'roll'
var_list[6].name = 'pitch'
var_list[7].name = 'temperature'
var_list[8].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'degrees'
var_list[5].units = 'degrees'
var_list[6].units = 'degrees'
var_list[7].units = 'degC'
var_list[8].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'VELPT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/05-VELPTJ000/recovered_cspp/velpt_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'velpt_j_eastward_velocity'
var_list[2].name = 'velpt_j_northward_velocity'
var_list[3].name = 'velpt_j_upward_velocity'
var_list[4].name = 'heading'
var_list[5].name = 'roll'
var_list[6].name = 'pitch'
var_list[7].name = 'temperature'
var_list[8].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'degrees'
var_list[5].units = 'degrees'
var_list[6].units = 'degrees'
var_list[7].units = 'degC'
var_list[8].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'OPTAA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/04-OPTAAJ000/recovered_cspp/optaa_dj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'OPTAA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/04-OPTAAJ000/recovered_cspp/optaa_dj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/07-FLORTJ000/recovered_cspp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/07-FLORTJ000/recovered_cspp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/01-DOSTAJ000/recovered_cspp/dosta_abcdjm_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[4].name = 'optode_temperature'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'umol/L'
var_list[4].units = 'degC'
var_list[5].units = 'dbar'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/01-DOSTAJ000/recovered_cspp/dosta_abcdjm_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[4].name = 'optode_temperature'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'umol/L'
var_list[4].units = 'degC'
var_list[5].units = 'dbar'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/08-CTDPFJ000/recovered_cspp/ctdpf_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temperature'
var_list[2].name = 'salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/08-CTDPFJ000/recovered_cspp/ctdpf_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temperature'
var_list[2].name = 'salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/09-PARADJ000/recovered_cspp/parad_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_j_par_counts_output'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/09-PARADJ000/recovered_cspp/parad_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_j_par_counts_output'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'NUTNR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/05-NUTNRJ000/recovered_cspp/nutnr_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'salinity_corrected_nitrate'
var_list[2].name = 'nitrate_concentration'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
var_list[3].units = 'dbar'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'NUTNR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/05-NUTNRJ000/recovered_cspp/nutnr_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'salinity_corrected_nitrate'
var_list[2].name = 'nitrate_concentration'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
var_list[3].units = 'dbar'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'SPKIR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/06-SPKIRJ000/recovered_cspp/spkir_abj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'SPKIR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/06-SPKIRJ000/recovered_cspp/spkir_abj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'VELPT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/02-VELPTJ000/recovered_cspp/velpt_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'velpt_j_eastward_velocity'
var_list[2].name = 'velpt_j_northward_velocity'
var_list[3].name = 'velpt_j_upward_velocity'
var_list[4].name = 'heading'
var_list[5].name = 'roll'
var_list[6].name = 'pitch'
var_list[7].name = 'temperature'
var_list[8].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'degrees'
var_list[5].units = 'degrees'
var_list[6].units = 'degrees'
var_list[7].units = 'degC'
var_list[8].units = 'dbar'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'VELPT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/02-VELPTJ000/recovered_cspp/velpt_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'velpt_j_eastward_velocity'
var_list[2].name = 'velpt_j_northward_velocity'
var_list[3].name = 'velpt_j_upward_velocity'
var_list[4].name = 'heading'
var_list[5].name = 'roll'
var_list[6].name = 'pitch'
var_list[7].name = 'temperature'
var_list[8].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'degrees'
var_list[5].units = 'degrees'
var_list[6].units = 'degrees'
var_list[7].units = 'degC'
var_list[8].units = 'dbar'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'OPTAA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/04-OPTAAJ000/recovered_cspp/optaa_dj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'OPTAA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/04-OPTAAJ000/recovered_cspp/optaa_dj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL386/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL386/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL384/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL384/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL383/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL383/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL382/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL382/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL381/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL381/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL327/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL327/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL326/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL326/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL320/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL320/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL319/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL319/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL312/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL312/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL311/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL311/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL247/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL247/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL386/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL386/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL384/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL384/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL383/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL383/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL382/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL382/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL381/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL381/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL327/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL327/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL326/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL326/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL320/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL320/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL319/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL319/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL312/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL312/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL311/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL311/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL247/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL247/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL386/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL386/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL384/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL384/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL383/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL383/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL382/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL382/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL381/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL381/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL327/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL327/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL326/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL326/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL320/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL320/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL319/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL319/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL312/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL312/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL311/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL311/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL247/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL247/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL386/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL386/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL384/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL384/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL383/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL383/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL382/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL382/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL381/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL381/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL327/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL327/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL326/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL326/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL320/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL320/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL319/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL319/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL312/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL312/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL311/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL311/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL247/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL247/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL386/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL384/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL383/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL382/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL381/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL327/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL326/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL320/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL319/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL312/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL311/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL247/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD11/06-METBKA000/telemetered/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD11/06-METBKA000/recovered_host/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD11/06-METBKA000/telemetered/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD11/06-METBKA000/recovered_host/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD11/06-METBKA000/telemetered/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD11/06-METBKA000/recovered_host/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD11/06-METBKA000/telemetered/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD11/06-METBKA000/recovered_host/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_MeanDir' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_mean_directional'
var_list[0].name = 'time'
var_list[1].name = 'mean_direction'
var_list[2].name = 'number_bands'
var_list[3].name = 'initial_frequency'
var_list[4].name = 'frequency_spacing'
var_list[5].name = 'psd_mean_directional'
var_list[6].name = 'mean_direction_array'
var_list[7].name = 'directional_spread_array'
var_list[8].name = 'spread_direction'
var_list[9].name = 'wavss_a_directional_frequency'
var_list[10].name = 'wavss_a_corrected_mean_wave_direction'
var_list[11].name = 'wavss_a_corrected_directional_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degrees'
var_list[2].units = '1'
var_list[3].units = 'Hz'
var_list[4].units = 'Hz'
var_list[5].units = 'm2 Hz-1'
var_list[6].units = 'degrees'
var_list[7].units = 'degrees'
var_list[8].units = 'degrees'
var_list[9].units = 'Hz'
var_list[10].units = 'deg'
var_list[11].units = 'deg'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_MeanDir' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_mean_directional_recovered'
var_list[0].name = 'time'
var_list[1].name = 'mean_direction'
var_list[2].name = 'number_bands'
var_list[3].name = 'initial_frequency'
var_list[4].name = 'frequency_spacing'
var_list[5].name = 'psd_mean_directional'
var_list[6].name = 'mean_direction_array'
var_list[7].name = 'directional_spread_array'
var_list[8].name = 'spread_direction'
var_list[9].name = 'wavss_a_directional_frequency'
var_list[10].name = 'wavss_a_corrected_mean_wave_direction'
var_list[11].name = 'wavss_a_corrected_directional_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degrees'
var_list[2].units = '1'
var_list[3].units = 'Hz'
var_list[4].units = 'Hz'
var_list[5].units = 'm2 Hz-1'
var_list[6].units = 'degrees'
var_list[7].units = 'degrees'
var_list[8].units = 'degrees'
var_list[9].units = 'Hz'
var_list[10].units = 'deg'
var_list[11].units = 'deg'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_MeanDir' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_mean_directional'
var_list[0].name = 'time'
var_list[1].name = 'mean_direction'
var_list[2].name = 'number_bands'
var_list[3].name = 'initial_frequency'
var_list[4].name = 'frequency_spacing'
var_list[5].name = 'psd_mean_directional'
var_list[6].name = 'mean_direction_array'
var_list[7].name = 'directional_spread_array'
var_list[8].name = 'spread_direction'
var_list[9].name = 'wavss_a_directional_frequency'
var_list[10].name = 'wavss_a_corrected_mean_wave_direction'
var_list[11].name = 'wavss_a_corrected_directional_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degrees'
var_list[2].units = '1'
var_list[3].units = 'Hz'
var_list[4].units = 'Hz'
var_list[5].units = 'm2 Hz-1'
var_list[6].units = 'degrees'
var_list[7].units = 'degrees'
var_list[8].units = 'degrees'
var_list[9].units = 'Hz'
var_list[10].units = 'deg'
var_list[11].units = 'deg'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_MeanDir' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_mean_directional_recovered'
var_list[0].name = 'time'
var_list[1].name = 'mean_direction'
var_list[2].name = 'number_bands'
var_list[3].name = 'initial_frequency'
var_list[4].name = 'frequency_spacing'
var_list[5].name = 'psd_mean_directional'
var_list[6].name = 'mean_direction_array'
var_list[7].name = 'directional_spread_array'
var_list[8].name = 'spread_direction'
var_list[9].name = 'wavss_a_directional_frequency'
var_list[10].name = 'wavss_a_corrected_mean_wave_direction'
var_list[11].name = 'wavss_a_corrected_directional_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degrees'
var_list[2].units = '1'
var_list[3].units = 'Hz'
var_list[4].units = 'Hz'
var_list[5].units = 'm2 Hz-1'
var_list[6].units = 'degrees'
var_list[7].units = 'degrees'
var_list[8].units = 'degrees'
var_list[9].units = 'Hz'
var_list[10].units = 'deg'
var_list[11].units = 'deg'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_MeanDir' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_mean_directional'
var_list[0].name = 'time'
var_list[1].name = 'mean_direction'
var_list[2].name = 'number_bands'
var_list[3].name = 'initial_frequency'
var_list[4].name = 'frequency_spacing'
var_list[5].name = 'psd_mean_directional'
var_list[6].name = 'mean_direction_array'
var_list[7].name = 'directional_spread_array'
var_list[8].name = 'spread_direction'
var_list[9].name = 'wavss_a_directional_frequency'
var_list[10].name = 'wavss_a_corrected_mean_wave_direction'
var_list[11].name = 'wavss_a_corrected_directional_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degrees'
var_list[2].units = '1'
var_list[3].units = 'Hz'
var_list[4].units = 'Hz'
var_list[5].units = 'm2 Hz-1'
var_list[6].units = 'degrees'
var_list[7].units = 'degrees'
var_list[8].units = 'degrees'
var_list[9].units = 'Hz'
var_list[10].units = 'deg'
var_list[11].units = 'deg'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_MeanDir' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_mean_directional_recovered'
var_list[0].name = 'time'
var_list[1].name = 'mean_direction'
var_list[2].name = 'number_bands'
var_list[3].name = 'initial_frequency'
var_list[4].name = 'frequency_spacing'
var_list[5].name = 'psd_mean_directional'
var_list[6].name = 'mean_direction_array'
var_list[7].name = 'directional_spread_array'
var_list[8].name = 'spread_direction'
var_list[9].name = 'wavss_a_directional_frequency'
var_list[10].name = 'wavss_a_corrected_mean_wave_direction'
var_list[11].name = 'wavss_a_corrected_directional_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degrees'
var_list[2].units = '1'
var_list[3].units = 'Hz'
var_list[4].units = 'Hz'
var_list[5].units = 'm2 Hz-1'
var_list[6].units = 'degrees'
var_list[7].units = 'degrees'
var_list[8].units = 'degrees'
var_list[9].units = 'Hz'
var_list[10].units = 'deg'
var_list[11].units = 'deg'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_MeanDir' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_mean_directional'
var_list[0].name = 'time'
var_list[1].name = 'mean_direction'
var_list[2].name = 'number_bands'
var_list[3].name = 'initial_frequency'
var_list[4].name = 'frequency_spacing'
var_list[5].name = 'psd_mean_directional'
var_list[6].name = 'mean_direction_array'
var_list[7].name = 'directional_spread_array'
var_list[8].name = 'spread_direction'
var_list[9].name = 'wavss_a_directional_frequency'
var_list[10].name = 'wavss_a_corrected_mean_wave_direction'
var_list[11].name = 'wavss_a_corrected_directional_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degrees'
var_list[2].units = '1'
var_list[3].units = 'Hz'
var_list[4].units = 'Hz'
var_list[5].units = 'm2 Hz-1'
var_list[6].units = 'degrees'
var_list[7].units = 'degrees'
var_list[8].units = 'degrees'
var_list[9].units = 'Hz'
var_list[10].units = 'deg'
var_list[11].units = 'deg'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_MeanDir' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_mean_directional_recovered'
var_list[0].name = 'time'
var_list[1].name = 'mean_direction'
var_list[2].name = 'number_bands'
var_list[3].name = 'initial_frequency'
var_list[4].name = 'frequency_spacing'
var_list[5].name = 'psd_mean_directional'
var_list[6].name = 'mean_direction_array'
var_list[7].name = 'directional_spread_array'
var_list[8].name = 'spread_direction'
var_list[9].name = 'wavss_a_directional_frequency'
var_list[10].name = 'wavss_a_corrected_mean_wave_direction'
var_list[11].name = 'wavss_a_corrected_directional_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degrees'
var_list[2].units = '1'
var_list[3].units = 'Hz'
var_list[4].units = 'Hz'
var_list[5].units = 'm2 Hz-1'
var_list[6].units = 'degrees'
var_list[7].units = 'degrees'
var_list[8].units = 'degrees'
var_list[9].units = 'Hz'
var_list[10].units = 'deg'
var_list[11].units = 'deg'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_NonDir' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_non_directional'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'psd_non_directional'
var_list[5].name = 'wavss_a_non_directional_frequency'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = 'm2 Hz-1'
var_list[5].units = 'Hz'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_NonDir' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_non_directional_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'psd_non_directional'
var_list[5].name = 'wavss_a_non_directional_frequency'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = 'm2 Hz-1'
var_list[5].units = 'Hz'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_NonDir' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_non_directional'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'psd_non_directional'
var_list[5].name = 'wavss_a_non_directional_frequency'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = 'm2 Hz-1'
var_list[5].units = 'Hz'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_NonDir' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_non_directional_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'psd_non_directional'
var_list[5].name = 'wavss_a_non_directional_frequency'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = 'm2 Hz-1'
var_list[5].units = 'Hz'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_NonDir' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_non_directional'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'psd_non_directional'
var_list[5].name = 'wavss_a_non_directional_frequency'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = 'm2 Hz-1'
var_list[5].units = 'Hz'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_NonDir' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_non_directional_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'psd_non_directional'
var_list[5].name = 'wavss_a_non_directional_frequency'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = 'm2 Hz-1'
var_list[5].units = 'Hz'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_NonDir' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_non_directional'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'psd_non_directional'
var_list[5].name = 'wavss_a_non_directional_frequency'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = 'm2 Hz-1'
var_list[5].units = 'Hz'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_NonDir' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_non_directional_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'psd_non_directional'
var_list[5].name = 'wavss_a_non_directional_frequency'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = 'm2 Hz-1'
var_list[5].units = 'Hz'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Motion' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_motion'
var_list[0].name = 'time'
var_list[1].name = 'number_time_samples'
var_list[2].name = 'initial_time'
var_list[3].name = 'time_spacing'
var_list[4].name = 'solution_found'
var_list[5].name = 'heave_offset_array'
var_list[6].name = 'north_offset_array'
var_list[7].name = 'east_offset_array'
var_list[8].name = 'wavss_a_buoymotion_time'
var_list[9].name = 'wavss_a_magcor_buoymotion_x'
var_list[10].name = 'wavss_a_magcor_buoymotion_y'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'sec'
var_list[3].units = 'sec'
var_list[4].units = '1'
var_list[5].units = 'm'
var_list[6].units = 'm'
var_list[7].units = 'm'
var_list[8].units = 'seconds since 1900-01-01'
var_list[9].units = 'm'
var_list[10].units = 'm'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Motion' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_motion_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_time_samples'
var_list[2].name = 'initial_time'
var_list[3].name = 'time_spacing'
var_list[4].name = 'solution_found'
var_list[5].name = 'heave_offset_array'
var_list[6].name = 'north_offset_array'
var_list[7].name = 'east_offset_array'
var_list[8].name = 'wavss_a_buoymotion_time'
var_list[9].name = 'wavss_a_magcor_buoymotion_x'
var_list[10].name = 'wavss_a_magcor_buoymotion_y'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'sec'
var_list[3].units = 'sec'
var_list[4].units = '1'
var_list[5].units = 'm'
var_list[6].units = 'm'
var_list[7].units = 'm'
var_list[8].units = 'seconds since 1900-01-01'
var_list[9].units = 'm'
var_list[10].units = 'm'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Motion' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_motion'
var_list[0].name = 'time'
var_list[1].name = 'number_time_samples'
var_list[2].name = 'initial_time'
var_list[3].name = 'time_spacing'
var_list[4].name = 'solution_found'
var_list[5].name = 'heave_offset_array'
var_list[6].name = 'north_offset_array'
var_list[7].name = 'east_offset_array'
var_list[8].name = 'wavss_a_buoymotion_time'
var_list[9].name = 'wavss_a_magcor_buoymotion_x'
var_list[10].name = 'wavss_a_magcor_buoymotion_y'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'sec'
var_list[3].units = 'sec'
var_list[4].units = '1'
var_list[5].units = 'm'
var_list[6].units = 'm'
var_list[7].units = 'm'
var_list[8].units = 'seconds since 1900-01-01'
var_list[9].units = 'm'
var_list[10].units = 'm'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Motion' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_motion_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_time_samples'
var_list[2].name = 'initial_time'
var_list[3].name = 'time_spacing'
var_list[4].name = 'solution_found'
var_list[5].name = 'heave_offset_array'
var_list[6].name = 'north_offset_array'
var_list[7].name = 'east_offset_array'
var_list[8].name = 'wavss_a_buoymotion_time'
var_list[9].name = 'wavss_a_magcor_buoymotion_x'
var_list[10].name = 'wavss_a_magcor_buoymotion_y'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'sec'
var_list[3].units = 'sec'
var_list[4].units = '1'
var_list[5].units = 'm'
var_list[6].units = 'm'
var_list[7].units = 'm'
var_list[8].units = 'seconds since 1900-01-01'
var_list[9].units = 'm'
var_list[10].units = 'm'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Motion' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_motion'
var_list[0].name = 'time'
var_list[1].name = 'number_time_samples'
var_list[2].name = 'initial_time'
var_list[3].name = 'time_spacing'
var_list[4].name = 'solution_found'
var_list[5].name = 'heave_offset_array'
var_list[6].name = 'north_offset_array'
var_list[7].name = 'east_offset_array'
var_list[8].name = 'wavss_a_buoymotion_time'
var_list[9].name = 'wavss_a_magcor_buoymotion_x'
var_list[10].name = 'wavss_a_magcor_buoymotion_y'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'sec'
var_list[3].units = 'sec'
var_list[4].units = '1'
var_list[5].units = 'm'
var_list[6].units = 'm'
var_list[7].units = 'm'
var_list[8].units = 'seconds since 1900-01-01'
var_list[9].units = 'm'
var_list[10].units = 'm'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Motion' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_motion_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_time_samples'
var_list[2].name = 'initial_time'
var_list[3].name = 'time_spacing'
var_list[4].name = 'solution_found'
var_list[5].name = 'heave_offset_array'
var_list[6].name = 'north_offset_array'
var_list[7].name = 'east_offset_array'
var_list[8].name = 'wavss_a_buoymotion_time'
var_list[9].name = 'wavss_a_magcor_buoymotion_x'
var_list[10].name = 'wavss_a_magcor_buoymotion_y'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'sec'
var_list[3].units = 'sec'
var_list[4].units = '1'
var_list[5].units = 'm'
var_list[6].units = 'm'
var_list[7].units = 'm'
var_list[8].units = 'seconds since 1900-01-01'
var_list[9].units = 'm'
var_list[10].units = 'm'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Motion' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_motion'
var_list[0].name = 'time'
var_list[1].name = 'number_time_samples'
var_list[2].name = 'initial_time'
var_list[3].name = 'time_spacing'
var_list[4].name = 'solution_found'
var_list[5].name = 'heave_offset_array'
var_list[6].name = 'north_offset_array'
var_list[7].name = 'east_offset_array'
var_list[8].name = 'wavss_a_buoymotion_time'
var_list[9].name = 'wavss_a_magcor_buoymotion_x'
var_list[10].name = 'wavss_a_magcor_buoymotion_y'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'sec'
var_list[3].units = 'sec'
var_list[4].units = '1'
var_list[5].units = 'm'
var_list[6].units = 'm'
var_list[7].units = 'm'
var_list[8].units = 'seconds since 1900-01-01'
var_list[9].units = 'm'
var_list[10].units = 'm'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Motion' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_motion_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_time_samples'
var_list[2].name = 'initial_time'
var_list[3].name = 'time_spacing'
var_list[4].name = 'solution_found'
var_list[5].name = 'heave_offset_array'
var_list[6].name = 'north_offset_array'
var_list[7].name = 'east_offset_array'
var_list[8].name = 'wavss_a_buoymotion_time'
var_list[9].name = 'wavss_a_magcor_buoymotion_x'
var_list[10].name = 'wavss_a_magcor_buoymotion_y'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'sec'
var_list[3].units = 'sec'
var_list[4].units = '1'
var_list[5].units = 'm'
var_list[6].units = 'm'
var_list[7].units = 'm'
var_list[8].units = 'seconds since 1900-01-01'
var_list[9].units = 'm'
var_list[10].units = 'm'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Fourier' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_fourier'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'number_directional_bands'
var_list[5].name = 'initial_directional_frequency'
var_list[6].name = 'directional_frequency_spacing'
var_list[7].name = 'fourier_coefficient_2d_array'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = '1'
var_list[5].units = 'Hz'
var_list[6].units = 'Hz'
var_list[7].units = '1'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Fourier' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_fourier_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'number_directional_bands'
var_list[5].name = 'initial_directional_frequency'
var_list[6].name = 'directional_frequency_spacing'
var_list[7].name = 'fourier_coefficient_2d_array'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = '1'
var_list[5].units = 'Hz'
var_list[6].units = 'Hz'
var_list[7].units = '1'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Fourier' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_fourier'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'number_directional_bands'
var_list[5].name = 'initial_directional_frequency'
var_list[6].name = 'directional_frequency_spacing'
var_list[7].name = 'fourier_coefficient_2d_array'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = '1'
var_list[5].units = 'Hz'
var_list[6].units = 'Hz'
var_list[7].units = '1'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Fourier' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_fourier_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'number_directional_bands'
var_list[5].name = 'initial_directional_frequency'
var_list[6].name = 'directional_frequency_spacing'
var_list[7].name = 'fourier_coefficient_2d_array'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = '1'
var_list[5].units = 'Hz'
var_list[6].units = 'Hz'
var_list[7].units = '1'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Fourier' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_fourier'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'number_directional_bands'
var_list[5].name = 'initial_directional_frequency'
var_list[6].name = 'directional_frequency_spacing'
var_list[7].name = 'fourier_coefficient_2d_array'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = '1'
var_list[5].units = 'Hz'
var_list[6].units = 'Hz'
var_list[7].units = '1'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Fourier' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_fourier_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'number_directional_bands'
var_list[5].name = 'initial_directional_frequency'
var_list[6].name = 'directional_frequency_spacing'
var_list[7].name = 'fourier_coefficient_2d_array'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = '1'
var_list[5].units = 'Hz'
var_list[6].units = 'Hz'
var_list[7].units = '1'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Fourier' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_fourier'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'number_directional_bands'
var_list[5].name = 'initial_directional_frequency'
var_list[6].name = 'directional_frequency_spacing'
var_list[7].name = 'fourier_coefficient_2d_array'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = '1'
var_list[5].units = 'Hz'
var_list[6].units = 'Hz'
var_list[7].units = '1'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Fourier' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_fourier_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'number_directional_bands'
var_list[5].name = 'initial_directional_frequency'
var_list[6].name = 'directional_frequency_spacing'
var_list[7].name = 'fourier_coefficient_2d_array'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = '1'
var_list[5].units = 'Hz'
var_list[6].units = 'Hz'
var_list[7].units = '1'
elif platform_name == 'CE04OSPS' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/SF01B/2A-CTDPFA107/streamed/ctdpf_sbe43_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'seawater_pressure'
var_list[5].name = 'seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSPD' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSPD/DP01B/01-CTDPFL105/recovered_inst/dpc_ctd_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'dpc_ctd_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSPD' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE04OSPD/DP01B/01-CTDPFL105/recovered_wfp/dpc_ctd_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'dpc_ctd_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSPS' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/SF01B/2A-CTDPFA107/streamed/ctdpf_sbe43_sample'
var_list[0].name = 'time'
var_list[1].name = 'corrected_dissolved_oxygen'
var_list[2].name = 'seawater_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'dbar'
elif platform_name == 'CE04OSPD' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSPD/DP01B/06-DOSTAD105/recovered_inst/dpc_optode_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'dbar'
elif platform_name == 'CE04OSPD' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE04OSPD/DP01B/06-DOSTAD105/recovered_wfp/dpc_optode_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'dbar'
elif platform_name == 'CE04OSPS' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/SF01B/3A-FLORTD104/streamed/flort_d_data_record'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE04OSPD' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSPD/DP01B/04-FLNTUA103/recovered_inst/dpc_flnturtd_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'flntu_x_mmp_cds_fluorometric_chlorophyll_a'
var_list[2].name = 'flntu_x_mmp_cds_total_volume_scattering_coefficient '
var_list[3].name = 'flntu_x_mmp_cds_bback_total'
var_list[4].name = 'flcdr_x_mmp_cds_fluorometric_cdom'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'ug/L'
var_list[2].units = 'm-1 sr-1'
var_list[3].units = 'm-1'
var_list[4].units = 'ppb'
var_list[5].units = 'dbar'
elif platform_name == 'CE04OSPD' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE04OSPD/DP01B/03-FLCDRA103/recovered_wfp/dpc_flcdrtd_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'flntu_x_mmp_cds_fluorometric_chlorophyll_a'
var_list[2].name = 'flntu_x_mmp_cds_total_volume_scattering_coefficient '
var_list[3].name = 'flntu_x_mmp_cds_bback_total'
var_list[4].name = 'flcdr_x_mmp_cds_fluorometric_cdom'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'ug/L'
var_list[2].units = 'm-1 sr-1'
var_list[3].units = 'm-1'
var_list[4].units = 'ppb'
var_list[5].units = 'dbar'
elif platform_name == 'CE04OSPS' and node == 'PROFILER' and instrument_class == 'PHSEN' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/SF01B/2B-PHSENA108/streamed/phsen_data_record'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'ph_seawater'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'dbar'
elif platform_name == 'CE04OSPS' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/SF01B/3C-PARADA102/streamed/parad_sa_sample'
var_list[0].name = 'time'
var_list[1].name = 'par_counts_output'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE04OSPS' and node == 'PROFILER' and instrument_class == 'SPKIR' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/SF01B/3D-SPKIRA102/streamed/spkir_data_record'
var_list[0].name = 'time'
var_list[1].name = 'spkir_downwelling_vector'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE04OSPS' and node == 'PROFILER' and instrument_class == 'NUTNR' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/SF01B/4A-NUTNRA102/streamed/nutnr_a_sample'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
var_list[3].units = 'dbar'
elif platform_name == 'CE04OSPS' and node == 'PROFILER' and instrument_class == 'PCO2W' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/SF01B/4F-PCO2WA102/streamed/pco2w_a_sami_data_record'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
var_list[3].units = 'dbar'
elif platform_name == 'CE04OSPS' and node == 'PROFILER' and instrument_class == 'VELPT' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/SF01B/4B-VELPTD106/streamed/velpt_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'velpt_d_eastward_velocity'
var_list[2].name = 'velpt_d_northward_velocity'
var_list[3].name = 'velpt_d_upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[9].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
var_list[9].units = 'dbar'
elif platform_name == 'CE04OSPD' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSPD/DP01B/02-VEL3DA105/recovered_inst/dpc_acm_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_a_eastward_velocity'
var_list[2].name = 'vel3d_a_northward_velocity'
var_list[3].name = 'vel3d_a_upward_velocity_ascending'
var_list[4].name = 'vel3d_a_upward_velocity_descending'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'm/s'
var_list[5].units = 'dbar'
elif platform_name == 'CE04OSPD' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE04OSPD/DP01B/02-VEL3DA105/recovered_wfp/dpc_acm_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_a_eastward_velocity'
var_list[2].name = 'vel3d_a_northward_velocity'
var_list[3].name = 'vel3d_a_upward_velocity_ascending'
var_list[4].name = 'vel3d_a_upward_velocity_descending'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'm/s'
var_list[5].units = 'dbar'
elif platform_name == 'CE04OSPS' and node == 'PLATFORM200M' and instrument_class == 'CTD' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/PC01B/4A-CTDPFA109/streamed/ctdpf_optode_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'seawater_pressure'
var_list[5].name = 'seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSPS' and node == 'PLATFORM200M' and instrument_class == 'DOSTA' and method == 'Streamed':
#uframe_dataset_name = 'CE04OSPS/PC01B/4A-DOSTAD109/streamed/ctdpf_optode_sample'
uframe_dataset_name = 'CE04OSPS/PC01B/4A-CTDPFA109/streamed/ctdpf_optode_sample'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'seawater_pressure' #also use this for the '4A-DOSTAD109/streamed/ctdpf_optode_sample' stream
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'dbar'
elif platform_name == 'CE04OSPS' and node == 'PLATFORM200M' and instrument_class == 'PHSEN' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/PC01B/4B-PHSENA106/streamed/phsen_data_record'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE04OSPS' and node == 'PLATFORM200M' and instrument_class == 'PCO2W' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/PC01B/4D-PCO2WA105/streamed/pco2w_a_sami_data_record'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
#Coastal Pioneer CSM Data Streams
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'METBK2' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/SBD12/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'METBK2' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/SBD12/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CP03ISSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CP03ISSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CP04OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CP04OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
#WAVSS
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_statistics'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_statistics_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'WAVSS_MeanDir' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_mean_directional'
var_list[0].name = 'time'
var_list[1].name = 'mean_direction'
var_list[2].name = 'number_bands'
var_list[3].name = 'initial_frequency'
var_list[4].name = 'frequency_spacing'
var_list[5].name = 'psd_mean_directional'
var_list[6].name = 'mean_direction_array'
var_list[7].name = 'directional_spread_array'
var_list[8].name = 'spread_direction'
var_list[9].name = 'wavss_a_directional_frequency'
var_list[10].name = 'wavss_a_corrected_mean_wave_direction'
var_list[11].name = 'wavss_a_corrected_directional_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degrees'
var_list[2].units = '1'
var_list[3].units = 'Hz'
var_list[4].units = 'Hz'
var_list[5].units = 'm2 Hz-1'
var_list[6].units = 'degrees'
var_list[7].units = 'degrees'
var_list[8].units = 'degrees'
var_list[9].units = 'Hz'
var_list[10].units = 'deg'
var_list[11].units = 'deg'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'WAVSS_MeanDir' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_mean_directional_recovered'
var_list[0].name = 'time'
var_list[1].name = 'mean_direction'
var_list[2].name = 'number_bands'
var_list[3].name = 'initial_frequency'
var_list[4].name = 'frequency_spacing'
var_list[5].name = 'psd_mean_directional'
var_list[6].name = 'mean_direction_array'
var_list[7].name = 'directional_spread_array'
var_list[8].name = 'spread_direction'
var_list[9].name = 'wavss_a_directional_frequency'
var_list[10].name = 'wavss_a_corrected_mean_wave_direction'
var_list[11].name = 'wavss_a_corrected_directional_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degrees'
var_list[2].units = '1'
var_list[3].units = 'Hz'
var_list[4].units = 'Hz'
var_list[5].units = 'm2 Hz-1'
var_list[6].units = 'degrees'
var_list[7].units = 'degrees'
var_list[8].units = 'degrees'
var_list[9].units = 'Hz'
var_list[10].units = 'deg'
var_list[11].units = 'deg'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'WAVSS_NonDir' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_non_directional'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'psd_non_directional'
var_list[5].name = 'wavss_a_non_directional_frequency'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = 'm2 Hz-1'
var_list[5].units = 'Hz'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'WAVSS_NonDir' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_non_directional_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'psd_non_directional'
var_list[5].name = 'wavss_a_non_directional_frequency'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = 'm2 Hz-1'
var_list[5].units = 'Hz'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'WAVSS_Motion' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_motion'
var_list[0].name = 'time'
var_list[1].name = 'number_time_samples'
var_list[2].name = 'initial_time'
var_list[3].name = 'time_spacing'
var_list[4].name = 'solution_found'
var_list[5].name = 'heave_offset_array'
var_list[6].name = 'north_offset_array'
var_list[7].name = 'east_offset_array'
var_list[8].name = 'wavss_a_buoymotion_time'
var_list[9].name = 'wavss_a_magcor_buoymotion_x'
var_list[10].name = 'wavss_a_magcor_buoymotion_y'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'sec'
var_list[3].units = 'sec'
var_list[4].units = '1'
var_list[5].units = 'm'
var_list[6].units = 'm'
var_list[7].units = 'm'
var_list[8].units = 'seconds since 1900-01-01'
var_list[9].units = 'm'
var_list[10].units = 'm'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'WAVSS_Motion' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_motion_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_time_samples'
var_list[2].name = 'initial_time'
var_list[3].name = 'time_spacing'
var_list[4].name = 'solution_found'
var_list[5].name = 'heave_offset_array'
var_list[6].name = 'north_offset_array'
var_list[7].name = 'east_offset_array'
var_list[8].name = 'wavss_a_buoymotion_time'
var_list[9].name = 'wavss_a_magcor_buoymotion_x'
var_list[10].name = 'wavss_a_magcor_buoymotion_y'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'sec'
var_list[3].units = 'sec'
var_list[4].units = '1'
var_list[5].units = 'm'
var_list[6].units = 'm'
var_list[7].units = 'm'
var_list[8].units = 'seconds since 1900-01-01'
var_list[9].units = 'm'
var_list[10].units = 'm'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'WAVSS_Fourier' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_fourier'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'number_directional_bands'
var_list[5].name = 'initial_directional_frequency'
var_list[6].name = 'directional_frequency_spacing'
var_list[7].name = 'fourier_coefficient_2d_array'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = '1'
var_list[5].units = 'Hz'
var_list[6].units = 'Hz'
var_list[7].units = '1'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'WAVSS_Fourier' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_fourier_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'number_directional_bands'
var_list[5].name = 'initial_directional_frequency'
var_list[6].name = 'directional_frequency_spacing'
var_list[7].name = 'fourier_coefficient_2d_array'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = '1'
var_list[5].units = 'Hz'
var_list[6].units = 'Hz'
var_list[7].units = '1'
#PCO2A
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CP03ISSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CP04OSSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
#PCO2A
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/SBD12/04-PCO2AA000/recovered_host/pco2a_a_dcl_instrument_water_recovered'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CP03ISSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/SBD12/04-PCO2AA000/recovered_host/pco2a_a_dcl_instrument_water_recovered'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CP04OSSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/SBD12/04-PCO2AA000/recovered_host/pco2a_a_dcl_instrument_water_recovered'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
#FDCHP
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'FDCHP' and method == 'RecoveredInst':
uframe_dataset_name = 'CP01CNSM/SBD12/08-FDCHPA000/recovered_inst/fdchp_a_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'FDCHP' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/SBD12/08-FDCHPA000/telemetered/fdchp_a_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'FDCHP' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/SBD12/08-FDCHPA000/recovered_host/fdchp_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/SBD11/06-METBKA000/telemetered/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/SBD11/06-METBKA000/recovered_host/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CP03ISSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/SBD11/06-METBKA000/telemetered/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CP03ISSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/SBD11/06-METBKA000/recovered_host/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CP04OSSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/SBD11/06-METBKA000/telemetered/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CP04OSSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/SBD11/06-METBKA000/recovered_host/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'METBK2-hr' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/SBD12/06-METBKA000/telemetered/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'METBK2-hr' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/SBD12/06-METBKA000/recovered_host/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/RID27/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CP01CNSM/RID27/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/RID27/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CP03ISSM/RID27/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/RID27/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CP04OSSM/RID27/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/MFD37/03-CTDBPE000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/MFD37/03-CTDBPE000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CP04OSSM/MFD37/03-CTDBPE000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/MFD37/03-CTDBPD000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/MFD37/03-CTDBPD000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CP03ISSM/MFD37/03-CTDBPD000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/MFD37/03-CTDBPD000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/MFD37/03-CTDBPD000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CP01CNSM/MFD37/03-CTDBPD000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/MFD37/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/RID27/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/MFD37/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/MFD37/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/RID27/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/MFD37/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/MFD37/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/RID27/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/MFD37/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CP01CNSM/RID26/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CP03ISSM/RID26/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CP04OSSM/RID26/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/RID26/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/RID26/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/RID26/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/RID27/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/RID27/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/RID27/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/RID26/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/RID26/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/RID26/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/RID27/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/RID27/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/RID27/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/RID26/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CP01CNSM/RID26/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/RID26/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CP03ISSM/RID26/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = | np.array([]) | numpy.array |
# BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE
import pytest # noqa: F401
import numpy as np # noqa: F401
import awkward as ak # noqa: F401
to_list = ak._v2.operations.to_list
def test_basic():
content = ak._v2.contents.NumpyArray(np.array([0.0, 1.1, 2.2, 3.3, 4.4]))
ind = np.array([2, 2, 0, 3, 4], dtype=np.int32)
index = ak._v2.index.Index32(ind)
array = ak._v2.contents.IndexedArray(index, content)
assert to_list(array) == [2.2, 2.2, 0.0, 3.3, 4.4]
ind[3] = 1
assert to_list(array) == [2.2, 2.2, 0.0, 1.1, 4.4]
ind = np.array([2, 2, 0, 3, 4], dtype=np.uint32)
index = ak._v2.index.IndexU32(ind)
array = ak._v2.contents.IndexedArray(index, content)
assert to_list(array) == [2.2, 2.2, 0.0, 3.3, 4.4]
ind[3] = 1
assert to_list(array) == [2.2, 2.2, 0.0, 1.1, 4.4]
ind = np.array([2, 2, 0, 3, 4], dtype=np.int64)
index = ak._v2.index.Index64(ind)
array = ak._v2.contents.IndexedArray(index, content)
assert to_list(array) == [2.2, 2.2, 0.0, 3.3, 4.4]
ind[3] = 1
assert to_list(array) == [2.2, 2.2, 0.0, 1.1, 4.4]
ind = np.array([2, 2, 0, 3, 4], dtype=np.int32)
index = ak._v2.index.Index32(ind)
array = ak._v2.contents.IndexedOptionArray(index, content)
assert to_list(array) == [2.2, 2.2, 0.0, 3.3, 4.4]
ind[3] = 1
assert to_list(array) == [2.2, 2.2, 0.0, 1.1, 4.4]
ind = np.array([2, 2, 0, 3, 4], dtype=np.int64)
index = ak._v2.index.Index64(ind)
array = ak._v2.contents.IndexedOptionArray(index, content)
assert to_list(array) == [2.2, 2.2, 0.0, 3.3, 4.4]
ind[3] = 1
assert to_list(array) == [2.2, 2.2, 0.0, 1.1, 4.4]
def test_type():
content = ak._v2.contents.NumpyArray(np.array([0.0, 1.1, 2.2, 3.3, 4.4]))
index = ak._v2.index.Index32(np.array([2, 2, 0, 3, 4], dtype=np.int32))
array = ak._v2.contents.IndexedArray(index, content)
assert ak._v2.operations.type(array) == ak._v2.types.NumpyType("float64")
array = ak._v2.contents.IndexedOptionArray(index, content)
assert ak._v2.operations.type(array) == ak._v2.types.OptionType(
ak._v2.types.NumpyType("float64")
)
def test_null():
content = ak._v2.contents.NumpyArray(np.array([0.0, 1.1, 2.2, 3.3, 4.4]))
index = ak._v2.index.Index64(np.array([2, 2, 0, -1, 4], dtype=np.int64))
array = ak._v2.contents.IndexedOptionArray(index, content)
assert to_list(array) == [2.2, 2.2, 0.0, None, 4.4]
def test_carry():
content = ak._v2.contents.NumpyArray(np.array([0.0, 1.1, 2.2, 3.3, 4.4]))
index = ak._v2.index.Index64(np.array([2, 2, 0, 3, 4], dtype=np.int64))
indexedarray = ak._v2.contents.IndexedArray(index, content)
offsets = ak._v2.index.Index64(np.array([0, 3, 3, 5], dtype=np.int64))
listoffsetarray = ak._v2.contents.ListOffsetArray(offsets, indexedarray)
assert to_list(listoffsetarray) == [[2.2, 2.2, 0.0], [], [3.3, 4.4]]
assert to_list(listoffsetarray[::-1]) == [[3.3, 4.4], [], [2.2, 2.2, 0.0]]
assert listoffsetarray.typetracer[::-1].form == listoffsetarray[::-1].form
assert to_list(listoffsetarray[[2, 0]]) == [[3.3, 4.4], [2.2, 2.2, 0.0]]
assert listoffsetarray.typetracer[[2, 0]].form == listoffsetarray[[2, 0]].form
assert to_list(listoffsetarray[[2, 0], 1]) == [4.4, 2.2] # invokes carry
assert listoffsetarray.typetracer[[2, 0], 1].form == listoffsetarray[[2, 0], 1].form
assert to_list(listoffsetarray[2:, 1]) == [4.4] # invokes carry
assert listoffsetarray.typetracer[2:, 1].form == listoffsetarray[2:, 1].form
index = ak._v2.index.Index64( | np.array([2, 2, 0, 3, -1], dtype=np.int64) | numpy.array |
import os
import pdb
import numpy as np
# pass in copy of objs?
def corrupt_graph(objs, triples, num_attrs, attrs, vocab, random_seed=None):
# either s,p,o, s_attrib, o_attrib
max_corruptable_elements = 5
# max num of objs, preds, attrs in vocab
max_objs = len(vocab['object_idx_to_name'])
max_preds = len(vocab['pred_idx_to_name'])
max_attrs = len(vocab['attribute_idx_to_name'])
# objs is all objects in batch: s/o index into this list
num_triples = len(triples)
s, p, o = np.split(triples, 3, axis=1)
num_triples = len(triples)
# object ids that index into model vocab
subj_objs = objs[s]
obj_objs = objs[o]
for n in range(0, num_triples):
# debug
subj = np.array(vocab['object_idx_to_name'])[subj_objs[n]]
pred = np.array(vocab['pred_idx_to_name'])[p[n]]
obj = np.array(vocab['object_idx_to_name'])[obj_objs[n]]
print(tuple([subj, pred, obj]))
pdb.set_trace()
# let's corrupt some part of each triple to avoid exact matches -
# randomly selected
element = np.random.randint(0, max_corruptable_elements-1)
element = 0
if element == 0: # s
# add new obj to objs
new_obj = select_object(max_objs)
objs += new_obj
s[n] = len(objs)-1
elif element == 1: # p
p[n] = select_predicate(max_preds)
elif element == 2: # o
new_obj = select_object(max_objs)
objs += new_obj
s[n] = len(objs)-1
elif element == 3: # s_attrib
a = select_attribute(max_attrs)
elif element == 4: # o_attrib
a = select_attribute(max_attrs)
pdb.set_trace()
return 0
def select_object(num_objs):
return np.random.randint(0, num_objs-1)
pdb.set_trace()
def select_predicate(num_preds):
return | np.random.randint(0, num_preds-1) | numpy.random.randint |
"""
Descriptions
"main.py"
References
# Freeze Weights
https://stackoverflow.com/questions/35298326/freeze-some-variables-scopes-in-tensorflow-stop-gradient-vs-passing-variables
# Tensorflow mutiple sessions with multiple GPUs
https://stackoverflow.com/questions/34775522/tensorflow-multiple-sessions-with-multiple-gpus
# Saver
goodtogreate.tistory.com/entry/Saving-and-Restoring
"""
# Public python modules
import tensorflow as tf
import pandas as pd
import numpy as np
import pickle
# Custom Pythone modules
import vgg16_adap
from gen_data import generate
from load_data import load
from cfmtx import cfmtx
# TODO: set the following parameters
gpu_device = 'device:GPU:0' # Which GPU are you going to use?
is_transfer_learn = True # Transfer the pre-trained weights?
gen_data = True # Generate dataset (.p format)?
freeze_layer = False # Are you going to freeze certain layers? (Check optimizer code below)
bn = False # Turn on batch normalization?
saver = False # Are you going to save whole weights after training?
fold = 1 # Fold k; k-fold cross-validation
rec_name = 'result/tr_nf_mspdb_2048_2048_592' + str(fold) # Save results as .csv;
rec_name_cfm = 'result/cfm_tr_nf_mspdb_2048_2048_592' + str(fold) + 'ep' # Record confusion matrix as .csv
pretrain_weights = 'vgg16_weights.npz' # Where is the pretraining weights?
saver_name = 'saver_tr_nf_mspdb_2048_2048_592_k5.npz' # if saver = True, save the weights as .npz
metadata_path = 'dataset/metadata_5fcv_box.csv' # where is meta-data?
traindata_path = 'dataset/train_5fcv_k' + str(fold) + '.p' # Where is training data?
validdata_path = 'dataset/valid_5fcv_k' + str(fold) + '.p' # Where is validation data?
label_column_name = 'category' # In the metadata, which index indicates category?
n_category = 39 # The number of categories;
batch_size_tr = 39 # Batch size of training data
batch_size_val = 39 # Batch size of validation data
n_epoch = 50 # Epoch
learning_rate = 0.001 # Learning rate
# With "gpu_device"
with tf.device(gpu_device):
# If "gen_data" = True, generate dataset in .p format
if gen_data:
generate(metadata_path = metadata_path, data_path = traindata_path,
batch_size = batch_size_tr, label_column_name=label_column_name,
is_training = True, fold=fold)
generate(metadata_path = metadata_path, data_path = validdata_path,
batch_size = batch_size_tr, label_column_name=label_column_name,
is_training = False, fold=fold)
else:
pass
# Calculate mean of each channel
#- Load the training data (.p); Note that "dataframe" is an instance
patch_mean = np.array([0, 0, 0], np.float32) # Init.
dataframe = load(traindata_path, batch_size_tr) # Instance
for i, row in dataframe.dataframe.iterrows():
# Calculate mean of each channel
patch = row['patch']
patch_mean[0] += np.mean(patch[:, :, 0]) # Ch 0
patch_mean[1] += np.mean(patch[:, :, 1]) # Ch 1
patch_mean[2] += np.mean(patch[:, :, 2]) # Ch 2
#print(patch_mean)
patch_mean = patch_mean / len(dataframe.dataframe['patch'])
print("patch_mean:", patch_mean)
dataframe.left = None # Delete "dataframe" from the memory
# Session
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
# Placeholders
imgs = tf.placeholder(tf.float32, [None, 224, 224, 3]) # [None, width_VGG16 * height_VGG16 * depth_VGG16]
y = tf.placeholder(tf.float32, [None, n_category])
# VGG16 instance; Transfer the pretrained weights
if is_transfer_learn:
vgg = vgg16_adap.vgg16(imgs=imgs, img_mean=patch_mean, weights=pretrain_weights, sess=sess, bn=bn, bn_is_training=False)
else:
vgg = vgg16_adap.vgg16(imgs=imgs, img_mean=patch_mean, sess=sess, bn=bn, bn_is_training=True)
# Logits, y_out, loss
logits = vgg.fc4l
y_out = tf.nn.softmax(logits)
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=y))
# Accuracy measurement
correct_prediction = tf.equal(tf.argmax(y_out, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# Optimization
if freeze_layer:
#train_op = tf.train.GradientDescentOptimizer(learning_rate=learning_rate).minimize(loss, var_list=[vgg.fc3w, vgg.fc3b, vgg.fc4w, vgg.fc4b])
# Deeper
train_op = tf.train.GradientDescentOptimizer(learning_rate=learning_rate).minimize(loss,var_list=[vgg.fc1w, vgg.fc1b,vgg.fc2w, vgg.fc2b,
vgg.fc3w, vgg.fc3b, vgg.fc4w, vgg.fc4b])
# Update all layers
else:
train_op = tf.train.GradientDescentOptimizer(learning_rate=learning_rate).minimize(loss)
# One should initialize "FCb" graph for TensorFlow
# In the case of not transferring the pre-trained weights, we need to initialize the whole graph
if is_transfer_learn:
init_new_vars_op = tf.variables_initializer([vgg.fc4w, vgg.fc4b]) # New; New FC layer @"vgg16_adap" needs graph initialization
sess.run(init_new_vars_op) # New; Run graph initialization
else:
sess.run(tf.global_variables_initializer())
# Training and validation
# Variables used for recording training and validation
rec_epoch = []
rec_train_err = []
rec_train_acc = []
rec_valid_err = []
rec_valid_acc = []
rec_cfm = [] # For recording confusion matrix
rec_epoch_cfm = 0
print("Start training...")
# Load the training data (.p); Note that "dataframe" is an instance
dataframe_tr = load(traindata_path, batch_size_tr)
num_batch_tr = dataframe_tr.n_batch
# Load the validation data (.p)
dataframe_valid = load(validdata_path, batch_size_val)
num_batch_valid = dataframe_valid.n_batch
# Loop; iter = epoch
for epoch in range(n_epoch):
# Variables for calculating average error and average accuracy
aver_train_err = 0
aver_train_acc = 0
# bn_is_training
vgg.bn_is_training = True
# Mini-batch training
for i in range(num_batch_tr):
batch_x, batch_y = dataframe_tr.next_batch()
err, acc, _ = sess.run([loss, accuracy, train_op],
feed_dict={vgg.imgs: batch_x, y: batch_y})
aver_train_err += err
aver_train_acc += acc
aver_train_err = aver_train_err / num_batch_tr
aver_train_acc = aver_train_acc / num_batch_tr
print("epoch:", epoch, "av_tr_err:", aver_train_err, "av_tr_acc:", aver_train_acc)
# Variables for calculating average-error and average-accuracy
aver_valid_err = 0
aver_valid_acc = 0
cfm = | np.zeros([n_category, n_category]) | numpy.zeros |
import pyvista as pv
import numpy as np
import vtk
from .sub_mesh import *
from .colormap_c2c import *
R_earth = 6371.0e3
#
#
#___CREATE PYVISTA OCEAN MESH___________________________________________________
# make potatoefication of Earth radius
def create_3d_ocean_mesh(mesh, data, potatoefac=0.5,variable='elevation', do_texture=False):
print(' --> compute 3d ocean mesh')
#___________________________________________________________________________
# do topographic potatoefication of ocean mesh
R_grid= R_earth
bottom_depth_2d = -mesh.n_z;
bottom_depth_2d[mesh.n_i==1]=0.0;
R_grid = R_grid-( bottom_depth_2d*100*potatoefac)
R_grid[mesh.n_i==1]=R_earth;
#___________________________________________________________________________
# create sperical ocean coordinates
xs,ys,zs = grid_cart3d(mesh.n_x, mesh.n_y, R_grid, is_deg=True)
points = np.column_stack([xs,ys,zs])
del xs,ys,zs
#___________________________________________________________________________
# Each cell in the cell array needs to include the size of the cell
# and the points belonging to the cell. In this example, there are 8
# hexahedral cells that have common points between them.
# cell_size = np.ones(mesh.n2dea, dtype=np.uint8)*3
cell_size = np.ones(mesh.n2de, dtype=np.uint8)*3
# cells = np.column_stack([cell_size, mesh.elem_2d_i])
cells = np.column_stack([cell_size, mesh.e_i])
cells = cells.ravel()
del cell_size
# each cell is a VTK_TRIANGLE
# celltypes = np.empty(mesh.n2dea, dtype=np.uint8)
celltypes = np.empty(mesh.n2de, dtype=np.uint8)
celltypes[:] = vtk.VTK_TRIANGLE
# the offset array points to the start of each cell (via flat indexing)
# offset = np.arange(0,4*mesh.n2dea,4, dtype=np.uint32 )
# offset = np.arange(0,4*mesh.n2de,4, dtype=np.uint32 )
#___________________________________________________________________________
# create pyvista unstrucutred mesh object
# meshpv_ocean = pv.UnstructuredGrid(offset, cells, celltypes, points)
meshpv_ocean = pv.UnstructuredGrid(cells, celltypes, points)
#___________________________________________________________________________
# add variables to ocean mesh
vname = list(data.keys())[0]
if not any(x in vname for x in ['depth','topo','topography','zcoord','bathymetry']):
meshpv_ocean['topo'] = -mesh.n_z
meshpv_ocean[vname] = data[vname].values
del cells, celltypes
#___do texture coordinates__________________________________________________
# Initialize the texture coordinates array
if do_texture:
meshpv_ocean.active_t_coords = np.zeros((points.shape[0], 2))
xs, ys, zs = grid_cart3d(mesh.n_x+90, mesh.n_y, R_grid/R_earth, is_deg=True)
meshpv_ocean.active_t_coords[:,0]= 0.5 + np.arctan2(-xs,ys)/(2 * np.pi)
meshpv_ocean.active_t_coords[:,1]= 0.5 + np.arcsin(zs)/np.pi
del xs, ys, zs
del points
#___________________________________________________________________________
return meshpv_ocean
#
#
#___CREATE PYVISTA LAND MESH TO FILL HOLES______________________________________
def create_3d_land_mesh(mesh, resol=1, potatoefac=1, do_topo=False, topo_path=[],
topo_varname='topo', topo_dimname=['lon','lat'], do_texture=True):
print(' --> compute 3d land mesh')
from matplotlib.path import Path
from matplotlib.tri import Triangulation
#___________________________________________________________________________
# cycle over all land polygons
for niland, lsmask in enumerate(mesh.lsmask_a):
poly_x, poly_y = lsmask[:,0], lsmask[:,1]
xmin, xmax = np.floor(poly_x).min(), np.ceil(poly_x).max()
ymin, ymax = np.floor(poly_y).min(), np.ceil(poly_y).max()
#resol = 1
x_m, y_m = np.meshgrid(np.arange(xmin, xmax, resol),np.arange(ymin, ymax, resol))
x_m, y_m = x_m.reshape((x_m.size, 1)), y_m.reshape((y_m.size, 1))
#_______________________________________________________________________
# check if regular points are within polygon
IN = Path(lsmask).contains_points(np.concatenate((x_m, y_m),axis=1))
x_m, y_m = x_m[IN==True], y_m[IN==True]
del IN
#_______________________________________________________________________
# combine polygon points and regular points within polygon --> do triangulation
outeredge = np.vstack((poly_x, poly_y)).transpose()
points = np.hstack((x_m, y_m))
points = np.vstack((outeredge,points))
tri = Triangulation(points[:,0], points[:,1])
del outeredge, poly_x, poly_y
#_______________________________________________________________________
# compute trinagle centroids and check if they are within polygon
tri_cx = np.sum(points[tri.triangles,0],axis=1)/3
tri_cy = np.sum(points[tri.triangles,1],axis=1)/3
tri_cx = np.reshape(tri_cx,(tri_cx.size,1))
tri_cy = np.reshape(tri_cy,(tri_cy.size,1))
IN = Path(lsmask).contains_points(np.concatenate((tri_cx,tri_cy),axis=1))
tri.triangles=tri.triangles[IN==True,:]
del tri_cx, tri_cy, IN
#_______________________________________________________________________
# concatenate all land trinagles
if niland==0:
land_points = points
land_elem2d = tri.triangles
else:
land_elem2d = np.concatenate((land_elem2d, tri.triangles+land_points.shape[0]), axis=0)
land_points = np.concatenate((land_points, points), axis=0)
del points
#___________________________________________________________________________
# do topographic scaling (potatoefication) for land mesh
R_grid = R_earth
if do_topo:
from netCDF4 import Dataset
from scipy.interpolate import griddata
fid = Dataset(topo_path,'r')
topo = fid.variables[topo_varname][:]
topo[topo<0]=0.0
lon = fid.variables[topo_dimname[0]][:]
lat = fid.variables[topo_dimname[1]][:]
fid.close()
mlon,mlat=np.meshgrid(lon,lat)
bottom_depth_2d = griddata( np.transpose( (mlon.flatten(),mlat.flatten() ) ), topo.flatten(), land_points, method='linear')
R_grid = R_grid+( bottom_depth_2d*100*potatoefac)
del topo,lon,lat,mlon,mlat
#___________________________________________________________________________
# create sperical ocean coordinates
xs,ys,zs = grid_cart3d(land_points[:,0], land_points[:,1], R_grid, is_deg=True)
points = np.column_stack([xs,ys,zs])
del xs,ys,zs
#___________________________________________________________________________
# Each cell in the cell array needs to include the size of the cell
# and the points belonging to the cell.
cell_size = np.ones(land_elem2d.shape[0], dtype=np.uint8)*3
cells = np.column_stack([cell_size, land_elem2d])
cells = cells.ravel()
# each cell is a VTK_TRIANGLE
celltypes = np.empty(land_elem2d.shape[0], dtype=np.uint8)
celltypes[:] = vtk.VTK_TRIANGLE
## the offset array points to the start of each cell (via flat indexing)
#offset = np.arange(0, 4*land_elem2d.shape[0], 4, dtype=np.uint32 )
#___________________________________________________________________________
# create pyvista unstrucutred mesh object
meshpv_land = pv.UnstructuredGrid(cells, celltypes, points)
# meshpv_land = pv.UnstructuredGrid(offset, cells, celltypes, points)
#del offset, cells, celltypes, points
del cells, celltypes
#___do texture coordinates__________________________________________________
# Initialize the texture coordinates array
if do_texture:
meshpv_land.active_t_coords = np.zeros((points.shape[0], 2))
xs, ys, zs = grid_cart3d(land_points[:,0]+90, land_points[:,1], R_grid/R_earth, is_deg=True)
meshpv_land.active_t_coords[:,0] = 0.5 + np.arctan2(-xs,ys)/(2 * np.pi)
meshpv_land.active_t_coords[:,1] = 0.5 + np.arcsin(zs)/np.pi
del xs, ys, zs
del points
#___________________________________________________________________________
# add land topography data to pyvista mesh object
if do_topo:
meshpv_land['topo'] = bottom_depth_2d
print(bottom_depth_2d.shape)
del bottom_depth_2d
#___________________________________________________________________________
return meshpv_land
#
#
#___CREATE PYVISTA 3d COASTLINE_________________________________________________
def create_3d_coastline(mesh):
print(' --> compute 3d coastline')
points_coast = list()
for niland, lsmask in enumerate(mesh.lsmask_a):
xs,ys,zs = grid_cart3d(lsmask[:,0], lsmask[:,1], R_earth*1.001, is_deg=True)
points = np.vstack((xs,ys,zs)).transpose()
points = np.row_stack((points,points[1,:]))
aux_points = np.column_stack((points[:-1,:],points[1:,:]))
aux_points = np.stack((points[:-1,:],points[1:,:]), axis=2)
aux_points = np.moveaxis(aux_points,0,1)
aux_points = aux_points.reshape(3,2*aux_points.shape[1]).transpose()
points_coast.append(aux_points)
del aux_points, points
return points_coast
#
#
#___CREATE PYVISTA 3D LONGITUDE GRID____________________________________________
def create_3d_lonlat_grid(dlon=30,dlat=15,potatoefac=1.0,do_topo=False):
points_lonlat_grid = list()
print(' --> compute 3d longitude grid')
grid_lon = np.arange(-180,180,dlon)
dum_lat = np.arange(-85,85+1,1)
for nlonline in range(0,len(grid_lon)):
if do_topo: xs,ys,zs = grid_cart3d(dum_lat*0+grid_lon[nlonline], dum_lat, R_earth+(6000*100*potatoefac), is_deg=True)
else : xs,ys,zs = grid_cart3d(dum_lat*0+grid_lon[nlonline], dum_lat, R_earth*1.005 , is_deg=True)
points = np.vstack((xs,ys,zs)).transpose()
aux_points = np.column_stack((points[:-1,:],points[1:,:]))
aux_points = np.stack((points[:-1,:],points[1:,:]), axis=2)
aux_points = | np.moveaxis(aux_points,0,1) | numpy.moveaxis |
from Bio.PDB.Atom import Atom
from Bio.PDB.PDBIO import PDBIO
from Bio.PDB import PDBParser, Polypeptide
from Bio import SVDSuperimposer
import numpy as np
import os
DATA_DIR = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'data')
VW_RADII = {
"ALA": {
"N": 1.7,
"CA": 2.0,
"C": 1.7,
"O": 1.4,
"CB": 2.0
},
"CYS": {
"N": 1.7,
"CA": 2.0,
"C": 1.7,
"O": 1.4,
"CB": 2.0,
"SG": 1.8
},
"ASP": {
"N": 1.7,
"CA": 2.0,
"C": 1.7,
"O": 1.4,
"CB": 2.0,
"CG": 1.7,
"OD1": 1.5,
"OD2": 1.5
},
"GLU": {
"N": 1.7,
"CA": 2.0,
"C": 1.7,
"O": 1.4,
"CB": 2.0,
"CG": 2.0,
"CD": 1.7,
"OE1": 1.5,
"OE2": 1.5
},
"PHE": {
"N": 1.7,
"CA": 2.0,
"C": 1.7,
"O": 1.4,
"CB": 2.0,
"CG": 1.7,
"CD1": 1.9,
"CD2": 1.9,
"CE1": 1.9,
"CE2": 1.9,
"CZ": 1.9
},
"GLY": {
"N": 1.7,
"CA": 2.0,
"C": 1.7,
"O": 1.4
},
"HIS": {
"N": 1.7,
"CA": 2.0,
"C": 1.7,
"O": 1.4,
"CB": 2.0,
"CG": 1.7,
"ND1": 1.7,
"CD2": 1.9,
"CE1": 1.9,
"NE2": 1.7
},
"ILE": {
"N": 1.7,
"CA": 2.0,
"C": 1.7,
"O": 1.4,
"CB": 2.0,
"CG1": 2.0,
"CG2": 2.0,
"CD1": 2.0
},
"LYS": {
"N": 1.7,
"CA": 2.0,
"C": 1.7,
"O": 1.4,
"CB": 2.0,
"CG": 2.0,
"CD": 2.0,
"CE": 2.0,
"NZ": 2.0
},
"LEU": {
"N": 1.7,
"CA": 2.0,
"C": 1.7,
"O": 1.4,
"CB": 2.0,
"CG": 2.0,
"CD1": 2.0,
"CD2": 2.0
},
"MET": {
"N": 1.7,
"CA": 2.0,
"C": 1.7,
"O": 1.4,
"CB": 2.0,
"CG": 2.0,
"SD": 1.8,
"CE": 2.0
},
"ASN": {
"N": 1.7,
"CA": 2.0,
"C": 1.7,
"O": 1.4,
"CB": 2.0,
"CG": 1.7,
"OD1": 1.6,
"ND2": 1.6
},
"PRO": {
"N": 1.7,
"CA": 2.0,
"C": 1.7,
"O": 1.4,
"CB": 2.0,
"CG": 2.0,
"CD": 2.0
},
"GLN": {
"N": 1.7,
"CA": 2.0,
"C": 1.7,
"O": 1.4,
"CB": 2.0,
"CG": 2.0,
"CD": 1.7,
"OE1": 1.6,
"NE2": 1.6
},
"ARG": {
"N": 1.7,
"CA": 2.0,
"C": 1.7,
"O": 1.4,
"CB": 2.0,
"CG": 2.0,
"CD": 2.0,
"NE": 1.7,
"CZ": 2.0,
"NH1": 2.0,
"NH2": 2.0
},
"SER": {
"N": 1.7,
"CA": 2.0,
"C": 1.7,
"O": 1.4,
"CB": 2.0,
"OG": 1.6
},
"THR": {
"N": 1.7,
"CA": 2.0,
"C": 1.7,
"O": 1.4,
"CB": 2.0,
"OG1": 1.6,
"CG2": 2.0
},
"VAL": {
"N": 1.7,
"CA": 2.0,
"C": 1.7,
"O": 1.4,
"CB": 2.0,
"CG1": 2.0,
"CG2": 2.0
},
"TRP": {
"N": 1.7,
"CA": 2.0,
"C": 1.7,
"O": 1.4,
"CB": 2.0,
"CG": 1.7,
"CD1": 1.9,
"CD2": 1.7,
"NE1": 1.7,
"CE2": 1.7,
"CE3": 1.9,
"CZ2": 1.9,
"CZ3": 1.9,
"CH2": 1.9
},
"TYR": {
"N": 1.7,
"CA": 2.0,
"C": 1.7,
"O": 1.4,
"CB": 2.0,
"CG": 1.7,
"CD1": 1.9,
"CD2": 1.9,
"CE1": 1.9,
"CE2": 1.9,
"CZ": 1.7,
"OH": 1.6
}
}
CHI_ANGLES = {"CHI1": {'CYS': {'axis': ['CA', 'CB'], 'ref_plane': ['N', 'CA', 'CB', 'SG']},
'ASP': {'axis': ['CA', 'CB'], 'ref_plane': ['N', 'CA', 'CB', 'CG']},
'SER': {'axis': ['CA', 'CB'], 'ref_plane': ['N', 'CA', 'CB', 'OG']},
'GLN': {'axis': ['CA', 'CB'], 'ref_plane': ['N', 'CA', 'CB', 'CG']},
'LYS': {'axis': ['CA', 'CB'], 'ref_plane': ['N', 'CA', 'CB', 'CG']},
'ILE': {'axis': ['CA', 'CB'], 'ref_plane': ['N', 'CA', 'CB', 'CG1']},
'PRO': {'axis': ['CA', 'CB'], 'ref_plane': ['N', 'CA', 'CB', 'CG']},
'THR': {'axis': ['CA', 'CB'], 'ref_plane': ['N', 'CA', 'CB', 'OG1']},
'PHE': {'axis': ['CA', 'CB'], 'ref_plane': ['N', 'CA', 'CB', 'CG']},
'ASN': {'axis': ['CA', 'CB'], 'ref_plane': ['N', 'CA', 'CB', 'CG']},
'HIS': {'axis': ['CA', 'CB'], 'ref_plane': ['N', 'CA', 'CB', 'CG']},
'LEU': {'axis': ['CA', 'CB'], 'ref_plane': ['N', 'CA', 'CB', 'CG']},
'ARG': {'axis': ['CA', 'CB'], 'ref_plane': ['N', 'CA', 'CB', 'CG']},
'TRP': {'axis': ['CA', 'CB'], 'ref_plane': ['N', 'CA', 'CB', 'CG']},
'VAL': {'axis': ['CA', 'CB'], 'ref_plane': ['N', 'CA', 'CB', 'CG1']},
'GLU': {'axis': ['CA', 'CB'], 'ref_plane': ['N', 'CA', 'CB', 'CG']},
'TYR': {'axis': ['CA', 'CB'], 'ref_plane': ['N', 'CA', 'CB', 'CG']},
'MET': {'axis': ['CA', 'CB'], 'ref_plane': ['N', 'CA', 'CB', 'CG']}},
"CHI2": {
'ASP': {'axis': ['CB', 'CG'], 'ref_plane': ['CA', 'CB', 'CG', 'OD1']},
'GLN': {'axis': ['CB', 'CG'], 'ref_plane': ['CA', 'CB', 'CG', 'CD']},
'LYS': {'axis': ['CB', 'CG'], 'ref_plane': ['CA', 'CB', 'CG', 'CD']},
'ILE': {'axis': ['CB', 'CG1'], 'ref_plane': ['CA', 'CB', 'CG1', 'CD1']},
'PRO': {'axis': ['CB', 'CG'], 'ref_plane': ['CA', 'CB', 'CG', 'CD']},
'PHE': {'axis': ['CB', 'CG'], 'ref_plane': ['CA', 'CB', 'CG', 'CD1']},
'ASN': {'axis': ['CB', 'CG'], 'ref_plane': ['CA', 'CB', 'CG', 'OD1']},
'HIS': {'axis': ['CB', 'CG'], 'ref_plane': ['CA', 'CB', 'CG', 'ND1']},
'LEU': {'axis': ['CB', 'CG'], 'ref_plane': ['CA', 'CB', 'CG', 'CD1']},
'ARG': {'axis': ['CB', 'CG'], 'ref_plane': ['CA', 'CB', 'CG', 'CD']},
'TRP': {'axis': ['CB', 'CG'], 'ref_plane': ['CA', 'CB', 'CG', 'CD1']},
'GLU': {'axis': ['CB', 'CG'], 'ref_plane': ['CA', 'CB', 'CG', 'CD']},
'TYR': {'axis': ['CB', 'CG'], 'ref_plane': ['CA', 'CB', 'CG', 'CD1']},
'MET': {'axis': ['CB', 'CG'], 'ref_plane': ['CA', 'CB', 'CG', 'SD']},
},
"CHI3": {
'ARG': {'axis': ['CG', 'CD'], 'ref_plane': ['CB', 'CG', 'CD', 'NE']},
'GLN': {'axis': ['CG', 'CD'], 'ref_plane': ['CB', 'CG', 'CD', 'OE1']},
'GLU': {'axis': ['CG', 'CD'], 'ref_plane': ['CB', 'CG', 'CD', 'OE1']},
'LYS': {'axis': ['CG', 'CD'], 'ref_plane': ['CB', 'CG', 'CD', 'CE']},
'MET': {'axis': ['CG', 'SD'], 'ref_plane': ['CB', 'CG', 'SD', 'CE']},
},
"CHI4": {
'ARG': {'axis': ['CD', 'NE'], 'ref_plane': ['CG', 'CD', 'NE', 'CZ']},
'LYS': {'axis': ['CG', 'CE'], 'ref_plane': ['CG', 'CD', 'CE', 'NZ']},
}
}
RESIDUE_ORDER = {'CYS': ['N', 'CA', 'C', 'O', 'CB', 'SG'],
'ASP': ['N', 'CA', 'C', 'O', 'CB', 'CG', 'OD1', 'OD2'],
'SER': ['N', 'CA', 'C', 'O', 'CB', 'OG'],
'GLN': ['N', 'CA', 'C', 'O', 'CB', 'CG', 'CD', 'NE2', 'OE1'],
'LYS': ['N', 'CA', 'C', 'O', 'CB', 'CG', 'CD', 'CE', 'NZ'],
'ILE': ['N', 'CA', 'C', 'O', 'CB', 'CG1', 'CG2', 'CD1'],
'PRO': ['N', 'CA', 'C', 'O', 'CB', 'CG', 'CD'],
'THR': ['N', 'CA', 'C', 'O', 'CB', 'CG2', 'OG1'],
'PHE': ['N', 'CA', 'C', 'O', 'CB', 'CG', 'CD1', 'CD2', 'CE1', 'CE2', 'CZ'],
'ASN': ['N', 'CA', 'C', 'O', 'CB', 'CG', 'ND2', 'OD1'],
'GLY': ['N', 'CA', 'C', 'O'],
'HIS': ['N', 'CA', 'C', 'O', 'CB', 'CG', 'CD2', 'ND1', 'CE1', 'NE2'],
'LEU': ['N', 'CA', 'C', 'O', 'CB', 'CG', 'CD1', 'CD2'],
'ARG': ['N', 'CA', 'C', 'O', 'CB', 'CG', 'CD', 'NE', 'CZ', 'NH1', 'NH2'],
'TRP': ['N', 'CA', 'C', 'O', 'CB', 'CG', 'CD1', 'CD2', 'CE2', 'CE3', 'NE1', 'CZ2', 'CZ3', 'CH2'],
'ALA': ['N', 'CA', 'C', 'O', 'CB'],
'VAL': ['N', 'CA', 'C', 'O', 'CB', 'CG1', 'CG2'],
'GLU': ['N', 'CA', 'C', 'O', 'CB', 'CG', 'CD', 'OE1', 'OE2'],
'TYR': ['N', 'CA', 'C', 'O', 'CB', 'CG', 'CD1', 'CD2', 'CE1', 'CE2', 'CZ', 'OH'],
'MET': ['N', 'CA', 'C', 'O', 'CB', 'CG', 'SD', 'CE']}
def load_rotamers(rotamer_loc="{}/rotamers.lib".format(DATA_DIR)):
_dunbrack = {}
with open(rotamer_loc) as fn:
for line in fn:
if line.startswith("#"):
continue
if not line.split()[0] in _dunbrack:
_dunbrack[line.split()[0]] = {}
if not int(line.split()[1]) in _dunbrack[line.split()[0]]:
_dunbrack[line.split()[0]][int(line.split()[1])] = {}
if not int(line.split()[2]) in _dunbrack[line.split()[0]][int(line.split()[1])]:
_dunbrack[line.split()[0]][int(line.split()[1])][int(line.split()[2])] = []
_dunbrack[line.split()[0]][int(line.split()[1])][int(line.split()[2])].append({
'prob': float(line.split()[8]),
'CHI1': float(line.split()[9]),
'CHI2': float(line.split()[10]),
'CHI3': float(line.split()[11]),
'CHI4': float(line.split()[12])
})
return _dunbrack
def rotation_matrix(axis, theta):
"""
Return the rotation matrix associated with counterclockwise rotation about
the given axis by theta radians.
"""
from scipy.linalg import expm, norm
return expm(np.cross(np.eye(3), axis / norm(axis) * theta))
def dihedral_from_vectors(v1, v2, v3, v4):
"""Praxeolitic formula
1 sqrt, 1 cross product"""
b0 = -1.0 * (v2 - v1)
b1 = v3 - v2
b2 = v4 - v3
# normalize b1 so that it does not influence magnitude of vector
# rejections that come next
b1 /= np.linalg.norm(b1)
# vector rejections
# v = projection of b0 onto plane perpendicular to b1
# = b0 minus component that aligns with b1
# w = projection of b2 onto plane perpendicular to b1
# = b2 minus component that aligns with b1
v = b0 - np.dot(b0, b1) * b1
w = b2 - np.dot(b2, b1) * b1
# angle between v and w in a plane is the torsion angle
# v and w may not be normalized but that's fine since tan is y/x
x = np.dot(v, w)
y = np.dot(np.cross(b1, v), w)
return np.arctan2(y, x)
def distance(x, y):
return np.sqrt((x[0] - y[0]) ** 2 + (x[1] - y[1]) ** 2 + (x[2] - y[2]) ** 2)
def select_best_rotemer_based_on_clashes(pdb_object, chain, res_num, mutate_to, sample_residue, rotamers):
best_rotamer = None
lowest_energy = float('inf')
for rotamer in rotamers:
vdw_energy = 0
# Introduce the rotamer
for angle in ['CHI1', 'CHI2', 'CHI3', 'CHI4']:
if mutate_to not in CHI_ANGLES[angle]:
continue
dihedral_start = dihedral_from_vectors(
*[sample_residue[x] for x in CHI_ANGLES[angle][mutate_to]['ref_plane']])
rotation_angle = dihedral_start - np.deg2rad(rotamer[angle])
axis = CHI_ANGLES[angle][mutate_to]['axis']
# print(angle)
for atom in RESIDUE_ORDER[mutate_to][RESIDUE_ORDER[mutate_to].index(axis[1]) + 1:]:
sample_residue[atom] = np.dot(
rotation_matrix(sample_residue[axis[0]] - sample_residue[axis[1]], rotation_angle),
sample_residue[atom] - sample_residue[axis[1]]) + sample_residue[axis[1]]
for rotamer_atom, rotamer_vector in sample_residue.items():
for residues in list(pdb_object[0][chain].get_residues()):
for residue_atoms in list(residues.get_atoms()):
if residues.get_id()[1] == res_num: # Skip itself
continue
# print(residues.get_id()[1], residue_atoms.get_id())
# print(residues.get_resname(), residue_atoms.coord, rotamer_atom, rotamer_vector)
dist = distance(residue_atoms.coord, rotamer_vector)
if dist > 6:
continue
try:
vdw_radi = VW_RADII[residues.get_resname()][residue_atoms.get_id()] + VW_RADII[mutate_to][rotamer_atom]
except KeyError:
continue
# print(residues.get_id()[1], residue_atoms.get_id(), rotamer_atom, dist, ((vdw_radi / dist) ** 12 - (vdw_radi / dist) ** 6))
vdw_energy += ((vdw_radi / dist) ** 12 - (vdw_radi / dist) ** 6)
# print(rotamer, vdw_energy)
# print('________________________')
if vdw_energy < lowest_energy:
lowest_energy = vdw_energy
best_rotamer = rotamer
return best_rotamer
def mutate(pdb_obj, chain, res_num, mutate_to, rotamer_lib=None, mutation_type="best"):
_residue, _residue_idx = [(x, n) for n, x in enumerate(pdb_obj[0][chain].get_residues()) if x.get_id()[1] == res_num][0]
# print(_residue)
_residue_atoms = list(_residue.get_atoms())
for atom in _residue_atoms:
if atom.name not in ['C', 'N', 'CA', 'O']:
residue = atom.parent
residue.detach_child(atom.id)
polypeptide = Polypeptide.Polypeptide(pdb_obj[0][chain])
phi, psi = polypeptide.get_phi_psi_list()[_residue_idx]
if not phi:
phi = 0
if not psi:
psi = 0
phi, psi = round(np.rad2deg(phi), -1), round(np.rad2deg(psi), -1)
# print(phi, psi)
# print(_residue['N'].coord)
sample_residue = {}
with open('{}/{}.pdb'.format(DATA_DIR, mutate_to.upper())) as fn:
for line in fn:
sample_residue[line[12:16].strip()] = np.array([float(line[30:38]), float(line[38:46]), float(line[46:54])])
starting_points = np.mat([sample_residue["N"], sample_residue["CA"], sample_residue["C"]])
end_points = | np.mat([_residue["N"].coord, _residue["CA"].coord, _residue["C"].coord]) | numpy.mat |
"""
Created in Nov. 2021
@author: <NAME> -- CALTECH
"""
import numpy as np , scipy as sp , random
from scipy.spatial import Voronoi , voronoi_plot_2d
from scipy import sparse
import matplotlib.pyplot as plt
from copy import deepcopy
from math import atan2
from shapely.geometry import Point
from shapely.geometry.polygon import Polygon
from itertools import product
def Lattice_Points(XY_lens , dens , disorder , unit_cell='square' , spdim=2):
nx = int(round(XY_lens[0]*np.sqrt(dens)))
ny = int(round(XY_lens[1]*np.sqrt(dens)))
if unit_cell == 'square':
R_lst = [[i , j] for i in range(nx) for j in range(ny)]
else:
R_lst_0 = [[(1+(-1)**(j+1))/4 , j*np.sqrt(3)/2] for j in range(1,ny,2)]
R_lst_1 = [[i+(1+(-1)**(j+1))/4 , j*np.sqrt(3)/2] for i in range(1,nx) for j in range(ny)]
R_lst = R_lst_0 + R_lst_1
R_cnts = np.asarray(R_lst).astype(float)
R_cnts -= | np.mean(R_cnts,axis=0) | numpy.mean |
# Taken from https://github.com/psclklnk/spdl and wrapped to our architecture
# Modified by <NAME>, copy of the license at TeachMyAgent/teachers/LICENSES/SPDL
import torch
import numpy as np
from copy import deepcopy
from functools import partial
from TeachMyAgent.teachers.algos.AbstractTeacher import AbstractTeacher
from TeachMyAgent.teachers.utils.conjugate_gradient import cg_step
from TeachMyAgent.teachers.utils.torch import to_float_tensor
from TeachMyAgent.teachers.utils.gaussian_torch_distribution import GaussianTorchDistribution
class Buffer:
def __init__(self, n_elements, max_buffer_size, reset_on_query):
self.reset_on_query = reset_on_query
self.max_buffer_size = max_buffer_size
self.buffers = [list() for i in range(0, n_elements)]
def update_buffer(self, datas):
if isinstance(datas[0], list):
for buffer, data in zip(self.buffers, datas):
buffer.extend(data)
else:
for buffer, data in zip(self.buffers, datas):
buffer.append(data)
while len(self.buffers[0]) > self.max_buffer_size:
for buffer in self.buffers:
del buffer[0]
def read_buffer(self, reset=None):
if reset is None:
reset = self.reset_on_query
res = tuple([buffer for buffer in self.buffers])
if reset:
for i in range(0, len(self.buffers)):
self.buffers[i] = []
return res
def __len__(self):
return len(self.buffers[0])
class AbstractSelfPacedTeacher():
'''
Base SPDL Teacher
'''
def __init__(self, init_mean, flat_init_chol, target_mean, flat_target_chol, alpha_function, max_kl, cg_parameters):
self.context_dist = GaussianTorchDistribution(init_mean, flat_init_chol, use_cuda=False)
self.target_dist = GaussianTorchDistribution(target_mean, flat_target_chol, use_cuda=False)
self.alpha_function = alpha_function
self.max_kl = max_kl
self.cg_parameters = {"n_epochs_line_search": 10, "n_epochs_cg": 10, "cg_damping": 1e-2,
"cg_residual_tol": 1e-10}
if cg_parameters is not None:
self.cg_parameters.update(cg_parameters)
self.task = None
self.iteration = 0
def target_context_kl(self, numpy=True):
kl_div = torch.distributions.kl.kl_divergence(self.context_dist.distribution_t,
self.target_dist.distribution_t).detach()
if numpy:
kl_div = kl_div.numpy()
return kl_div
def save(self, path):
weights = self.context_dist.get_weights()
np.save(path, weights)
def load(self, path):
self.context_dist.set_weights(np.load(path))
def _compute_context_kl(self, old_context_dist):
return torch.distributions.kl.kl_divergence(old_context_dist.distribution_t, self.context_dist.distribution_t)
def _compute_context_loss(self, cons_t, old_c_log_prob_t, c_val_t, alpha_cur_t):
con_ratio_t = torch.exp(self.context_dist.log_pdf_t(cons_t) - old_c_log_prob_t)
kl_div = torch.distributions.kl.kl_divergence(self.context_dist.distribution_t, self.target_dist.distribution_t)
return torch.mean(con_ratio_t * c_val_t) - alpha_cur_t * kl_div
class SelfPacedTeacher(AbstractTeacher, AbstractSelfPacedTeacher):
def __init__(self, mins, maxs, seed, env_reward_lb, env_reward_ub, update_frequency, update_offset, alpha_function, initial_dist=None,
target_dist=None, max_kl=0.1, std_lower_bound=None, kl_threshold=None, cg_parameters=None,
use_avg_performance=False, max_context_buffer_size=1000, reset_contexts=True, discount_factor=0.99):
'''
Self-paced Deep Reinforcement Learning (https://papers.nips.cc/paper/2020/hash/68a9750337a418a86fe06c1991a1d64c-Abstract.html).
Taken from https://github.com/psclklnk/spdl and wrapped to our architecture.
Works in a non-episodic setup, updates are thus made in the `step_update` method.
Args:
update_frequency: Update frequency of the sampling distribution (in steps)
update_offset: How many steps must be done before the starting to update the distribution
alpha_function: Function calculating the alpha parameter
initial_dist: Initial distribution to start from
target_dist: Target distribution to reach
max_kl: Maximum KL-divergence authorized between the old and new distributions when updating
std_lower_bound: Minimum std authorized on the sampling distribution if the KL-divergence between
the latter and the target distribution is greater than `kl_threshold`. Set this to
`None` if no constraint on the std must be applied
kl_threshold: Threshold enforcing the std constraint
cg_parameters: Additional parameters for the Conjugate Gradient method
use_avg_performance: Whether the alpha function must used the averaged performance
max_context_buffer_size: Maximum size of the buffer storing sampled tasks
reset_contexts: Whether the buffer should be reset when queried
discount_factor: Discount factor used in the Universal Value Function
'''
AbstractTeacher.__init__(self, mins, maxs, env_reward_lb, env_reward_ub, seed)
torch.manual_seed(self.seed)
initial_mean, initial_variance = self.get_or_create_dist(initial_dist, mins, maxs, subspace=True) # Random subspace of the task space if no intial dist
target_mean, target_variance = self.get_or_create_dist(target_dist, mins, maxs, subspace=False) # Full task space if no intial dist
context_bounds = (np.array(mins), np.array(maxs))
self.update_frequency = update_frequency
self.update_offset = update_offset
self.step_counter = 0
self.discounted_sum_reward = 0
self.discount_factor = discount_factor
self.discounted_sum_rewards = []
self.current_disc = 1
self.pending_initial_state = None
self.algorithm_iterations = 0
# The bounds that we show to the outside are limited to the interval [-1, 1], as this is typically better for
# neural nets to deal with
self.context_buffer = Buffer(2, max_context_buffer_size, reset_contexts)
self.context_dim = target_mean.shape[0]
self.context_bounds = context_bounds
self.use_avg_performance = use_avg_performance
if std_lower_bound is not None and kl_threshold is None:
raise RuntimeError("Error! Both Lower Bound on standard deviation and kl threshold need to be set")
else:
if std_lower_bound is not None:
if isinstance(std_lower_bound, np.ndarray):
if std_lower_bound.shape[0] != self.context_dim:
raise RuntimeError("Error! Wrong dimension of the standard deviation lower bound")
elif std_lower_bound is not None:
std_lower_bound = np.ones(self.context_dim) * std_lower_bound
self.std_lower_bound = std_lower_bound
self.kl_threshold = kl_threshold
# Create the initial context distribution
if isinstance(initial_variance, np.ndarray):
flat_init_chol = GaussianTorchDistribution.flatten_matrix(initial_variance, tril=False)
else:
flat_init_chol = GaussianTorchDistribution.flatten_matrix(initial_variance * | np.eye(self.context_dim) | numpy.eye |
import numpy as np
import matplotlib.pyplot as plt
from irise.plot.util import legend
from myscripts.statistics import mean_diff
from myscripts.models import speedy
from myscripts.projects.ithaca.tendencies import load_tendency
def main():
sigma = speedy.sigma_levels[1]
sbits = | np.arange(5, 24) | numpy.arange |
import datetime
import random
import time
import numpy as np
import torch
import torch.utils.data
import utils
from datasets import load_dataset, load_metric
from reprod_log import ReprodLogger
from torch import nn
from transformers import AdamW, BertTokenizer, DataCollatorWithPadding, get_scheduler
from transformers.models.bert import BertForSequenceClassification
task_to_keys = {
"cola": ("sentence", None),
"mnli": ("premise", "hypothesis"),
"mrpc": ("sentence1", "sentence2"),
"qnli": ("question", "sentence"),
"qqp": ("question1", "question2"),
"rte": ("sentence1", "sentence2"),
"sst2": ("sentence", None),
"stsb": ("sentence1", "sentence2"),
"wnli": ("sentence1", "sentence2"),
}
def train_one_epoch(
model,
criterion,
optimizer,
lr_scheduler,
data_loader,
device,
epoch,
print_freq,
scaler=None, ):
model.train()
metric_logger = utils.MetricLogger(delimiter=" ")
metric_logger.add_meter(
"lr", utils.SmoothedValue(
window_size=1, fmt="{value}"))
metric_logger.add_meter(
"sentence/s", utils.SmoothedValue(
window_size=10, fmt="{value}"))
header = "Epoch: [{}]".format(epoch)
for batch in metric_logger.log_every(data_loader, print_freq, header):
start_time = time.time()
batch.to(device)
labels = batch.pop("labels")
with torch.cuda.amp.autocast(enabled=scaler is not None):
logits = model(**batch)[0]
loss = criterion(
logits.reshape(-1, model.num_labels), labels.reshape(-1))
optimizer.zero_grad()
if scaler is not None:
scaler.scale(loss).backward()
scaler.step(optimizer)
scaler.update()
else:
loss.backward()
optimizer.step()
lr_scheduler.step()
batch_size = batch["input_ids"].shape[0]
metric_logger.update(
loss=loss.item(), lr=lr_scheduler.get_last_lr()[-1])
metric_logger.meters["sentence/s"].update(batch_size /
(time.time() - start_time))
def evaluate(model, criterion, data_loader, device, metric, print_freq=100):
model.eval()
metric_logger = utils.MetricLogger(delimiter=" ")
header = "Test:"
with torch.no_grad():
for batch in metric_logger.log_every(data_loader, print_freq, header):
batch.to(device)
labels = batch.pop("labels")
logits = model(**batch)[0]
loss = criterion(
logits.reshape(-1, model.num_labels), labels.reshape(-1))
metric_logger.update(loss=loss.item())
metric.add_batch(
predictions=logits.argmax(dim=-1),
references=labels, )
acc_global_avg = metric.compute()["accuracy"]
# gather the stats from all processes
metric_logger.synchronize_between_processes()
print(" * Accuracy {acc_global_avg:.6f}".format(
acc_global_avg=acc_global_avg))
return acc_global_avg
def set_seed(seed=42):
random.seed(seed)
| np.random.seed(seed) | numpy.random.seed |
import numpy as np
import dlib
import glob
import cv2
import os
os.chdir('/media/imi-yujun/579e63e9-5852-43a7-9323-8e51241f5d3a/yujun/Course_porject_fac e')
def LoadBase():
predictor_path = 'Network/shape_predictor_68_face_landmarks.dat'
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor(predictor_path)
return predictor,detector
def SaveLandmark(shape):
tmp = np.zeros((68,2),dtype=np.uint)
for i in range(68):
tmp[i,0] = shape.part(i).x
tmp[i, 1] = shape.part(i).y
return tmp
def Run():
count =0
predictor, detector = LoadBase()
image_list = glob.glob('CoarseData/CoarseData/*/*.jpg')
for path_ in image_list:
image_= cv2.imread(path_)
#gray_= cv2.cvtColor(image_, cv2.COLOR_BGR2GRAY)
try:
print(count)
count += 1
# dets = detector(gray_, 1)
# shape = predictor(gray_, dets[0])
# tmp = SaveLandmark(shape)
# np.savetxt(path_[:len(path_)-4]+'_landmark.txt',tmp, fmt='%d')
# res = Normalize(image_,tmp)
# cv2.imwrite(path_[:len(path_)-4]+'_224.png',res)
#
landmark = np.loadtxt(path_[:len(path_)-4]+'_landmark.txt')
shape_, exp_, eular_, translate_, scale_ = Get3Dmm(path_[:len(path_)-4]+'.txt')
crop_image, translation, new_scale = Normalize2(image_,landmark,translate_,scale_)
cv2.imwrite(path_[:len(path_) - 4] + '_224.png', crop_image)
label = np.zeros([185])
label[:100] = shape_
label[100:179] = exp_
label[179:182] =eular_
label[182:184] = translate_
label[184] = scale_
np.savetxt(path_[:len(path_) - 4] + '_224.txt',label)
except:
continue
print("error")
def Normalize(image,landmark_):
xmin = np.min(landmark_[:,0])
xmax = np.max(landmark_[:,0])
ymin = np.min(landmark_[:,1])
ymax = np.max(landmark_[:,1])
sub_image = image[ymin:ymax,xmin:xmax]
res = cv2.resize(sub_image, (224,224), interpolation=cv2.INTER_LINEAR)
return res
def Package():
Save_path = 'Input/'
image_list = glob.glob('CoarseData/CoarseData/*/*_224.png')
data = np.zeros((len(image_list),224,224,3),dtype=np.uint8)
label = np.zeros((len(image_list),185))
for i in range(len(image_list)):
print(i)
path_ = image_list[i]
img = cv2.imread(path_)
# f = open(path_[:len(path_)-8]+'.txt')
# content = f.readlines()
# content= [x.strip() for x in content]
# label[i,:100] = np.array(content[0].split(" "),dtype = np.float)
# label[i,100:179] = np.array(content[1].split(" "),dtype = np.float)
# label[i,179:] = np.array(content[2].split(" "),dtype = np.float)
# f.close()
label[i,:] = np.loadtxt(path_[:len(path_)-8]+'_224.txt')
data[i,:,:] = np.array(img,dtype=np.uint8)
np.save(Save_path+'data.npy',data)
np.save(Save_path+'label.npy',label)
def Split():
test_num = 5000
data = np.load('Input/data.npy')
label= np.load('Input/label.npy')
train_data= data[test_num:,:]
test_data = data[:test_num,:]
test_label = label[:test_num,:]
train_label = label[test_num:,:]
np.save('Input/train_data.npy',train_data)
np.save('Input/mean_data.npy',np.mean(data,axis=0))
np.save('Input/mean_label.npy', np.mean(label, axis=0))
np.save('Input/test_data.npy',test_data)
np.save('Input/train_label.npy',train_label)
np.save('Input/test_label.npy',test_label)
np.save('Input/std_label.npy', np.std(label, axis=0))
net_img_size = 224
def Normalize2(image, landmark_, translation=np.array([0,0]), scale=0):
xmin = np.min(landmark_[:, 0])
xmax = np.max(landmark_[:, 0])
ymin = np.min(landmark_[:, 1])
ymax = np.max(landmark_[:, 1])
old_cx = (xmin + xmax) / 2
old_cy = (ymin + ymax) / 2;
cx = (net_img_size - 1) / 2.0
cy = (net_img_size - 1) * 2.0 / 5.0;
length = ((xmax - xmin) ** 2 + (ymax - ymin) ** 2) ** 0.5
length *= 1.2
ori_crop_scale = net_img_size / length
new_scale = scale * ori_crop_scale
image = cv2.resize(image, (0, 0), fx=ori_crop_scale, fy=ori_crop_scale)
old_cx = old_cx * ori_crop_scale
old_cy = old_cy * ori_crop_scale
start_x = int(old_cx - cx)
start_y = int(old_cy - cy)
crop_image = image[start_y:start_y + 224, start_x:start_x + 224]
shape_ = np.shape(crop_image)
tmp = np.zeros((224,224,3),dtype=np.uint8)
tmp[:shape_[0],:shape_[1],:] = crop_image
translation = translation * ori_crop_scale
translation[0] = translation[0] - start_x
translation[1] = translation[1] - (len(image) - 224-start_y)
# landmark_=landmark_*ori_crop_scale
# tmp = np.zeros((224,224),dtype=np.uint8)
# for i in range(68):
# tmp[ int(landmark_[i,1] - start_y),int(landmark_[i,0] - start_x) ] = 255;
# cv2.imwrite("landmarl.jpg",tmp)
return tmp, translation, new_scale
def Get3Dmm(path):
with open(path) as f:
dmm_para = f.readlines()
dmm_para = [x.strip() for x in dmm_para]
shape_ = np.array(dmm_para[0].split(), dtype=np.float)
exp_ = np.array(dmm_para[1].split(), dtype=np.float)
tmp = np.array(dmm_para[2].split(), dtype=np.float)
eular_ = tmp[:3]
translate_ = tmp[3:5]
scale_ = tmp[5]
return shape_,exp_,eular_,translate_,scale_
def Custom():
cap = cv2.VideoCapture('Input/gx.MOV')
sample_num = 150
M = cv2.getRotationMatrix2D((1920 / 2, 1080 / 2), 270, 1)
predictor, detector = LoadBase()
index_=0
data = | np.zeros((sample_num,224,224,3)) | numpy.zeros |
"""
Test the ColumnTransformer.
"""
import numpy as np
from scipy import sparse
import pytest
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_dict_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_allclose_dense_sparse
from sklearn.base import BaseEstimator
from sklearn.compose import ColumnTransformer, make_column_transformer
from sklearn.exceptions import NotFittedError
from sklearn.preprocessing import StandardScaler, Normalizer
from sklearn.feature_extraction import DictVectorizer
class Trans(BaseEstimator):
def fit(self, X, y=None):
return self
def transform(self, X, y=None):
# 1D Series -> 2D DataFrame
if hasattr(X, 'to_frame'):
return X.to_frame()
# 1D array -> 2D array
if X.ndim == 1:
return np.atleast_2d(X).T
return X
class SparseMatrixTrans(BaseEstimator):
def fit(self, X, y=None):
return self
def transform(self, X, y=None):
n_samples = len(X)
return sparse.eye(n_samples, n_samples).tocsr()
def test_column_transformer():
X_array = np.array([[0, 1, 2], [2, 4, 6]]).T
X_res_first1D = np.array([0, 1, 2])
X_res_second1D = np.array([2, 4, 6])
X_res_first = X_res_first1D.reshape(-1, 1)
X_res_both = X_array
cases = [
# single column 1D / 2D
(0, X_res_first),
([0], X_res_first),
# list-like
([0, 1], X_res_both),
(np.array([0, 1]), X_res_both),
# slice
(slice(0, 1), X_res_first),
(slice(0, 2), X_res_both),
# boolean mask
(np.array([True, False]), X_res_first),
]
for selection, res in cases:
ct = ColumnTransformer([('trans', Trans(), selection)],
remainder='drop')
assert_array_equal(ct.fit_transform(X_array), res)
assert_array_equal(ct.fit(X_array).transform(X_array), res)
ct = ColumnTransformer([('trans1', Trans(), [0]),
('trans2', Trans(), [1])])
assert_array_equal(ct.fit_transform(X_array), X_res_both)
assert_array_equal(ct.fit(X_array).transform(X_array), X_res_both)
# test with transformer_weights
transformer_weights = {'trans1': .1, 'trans2': 10}
both = ColumnTransformer([('trans1', Trans(), [0]),
('trans2', Trans(), [1])],
transformer_weights=transformer_weights)
res = np.vstack([transformer_weights['trans1'] * X_res_first1D,
transformer_weights['trans2'] * X_res_second1D]).T
assert_array_equal(both.fit_transform(X_array), res)
assert_array_equal(both.fit(X_array).transform(X_array), res)
both = ColumnTransformer([('trans', Trans(), [0, 1])],
transformer_weights={'trans': .1})
assert_array_equal(both.fit_transform(X_array), 0.1 * X_res_both)
assert_array_equal(both.fit(X_array).transform(X_array), 0.1 * X_res_both)
def test_column_transformer_dataframe():
pd = pytest.importorskip('pandas')
X_array = np.array([[0, 1, 2], [2, 4, 6]]).T
X_df = pd.DataFrame(X_array, columns=['first', 'second'])
X_res_first = np.array([0, 1, 2]).reshape(-1, 1)
X_res_both = X_array
cases = [
# String keys: label based
# scalar
('first', X_res_first),
# list
(['first'], X_res_first),
(['first', 'second'], X_res_both),
# slice
(slice('first', 'second'), X_res_both),
# int keys: positional
# scalar
(0, X_res_first),
# list
([0], X_res_first),
([0, 1], X_res_both),
(np.array([0, 1]), X_res_both),
# slice
(slice(0, 1), X_res_first),
(slice(0, 2), X_res_both),
# boolean mask
(np.array([True, False]), X_res_first),
(pd.Series([True, False], index=['first', 'second']), X_res_first),
]
for selection, res in cases:
ct = ColumnTransformer([('trans', Trans(), selection)],
remainder='drop')
assert_array_equal(ct.fit_transform(X_df), res)
assert_array_equal(ct.fit(X_df).transform(X_df), res)
ct = ColumnTransformer([('trans1', Trans(), ['first']),
('trans2', Trans(), ['second'])])
assert_array_equal(ct.fit_transform(X_df), X_res_both)
assert_array_equal(ct.fit(X_df).transform(X_df), X_res_both)
ct = ColumnTransformer([('trans1', Trans(), [0]),
('trans2', Trans(), [1])])
assert_array_equal(ct.fit_transform(X_df), X_res_both)
assert_array_equal(ct.fit(X_df).transform(X_df), X_res_both)
# test with transformer_weights
transformer_weights = {'trans1': .1, 'trans2': 10}
both = ColumnTransformer([('trans1', Trans(), ['first']),
('trans2', Trans(), ['second'])],
transformer_weights=transformer_weights)
res = np.vstack([transformer_weights['trans1'] * X_df['first'],
transformer_weights['trans2'] * X_df['second']]).T
assert_array_equal(both.fit_transform(X_df), res)
assert_array_equal(both.fit(X_df).transform(X_df), res)
# test multiple columns
both = ColumnTransformer([('trans', Trans(), ['first', 'second'])],
transformer_weights={'trans': .1})
assert_array_equal(both.fit_transform(X_df), 0.1 * X_res_both)
assert_array_equal(both.fit(X_df).transform(X_df), 0.1 * X_res_both)
both = ColumnTransformer([('trans', Trans(), [0, 1])],
transformer_weights={'trans': .1})
assert_array_equal(both.fit_transform(X_df), 0.1 * X_res_both)
assert_array_equal(both.fit(X_df).transform(X_df), 0.1 * X_res_both)
# ensure pandas object is passes through
class TransAssert(BaseEstimator):
def fit(self, X, y=None):
return self
def transform(self, X, y=None):
assert_true(isinstance(X, (pd.DataFrame, pd.Series)))
if isinstance(X, pd.Series):
X = X.to_frame()
return X
ct = ColumnTransformer([('trans', TransAssert(), 'first')],
remainder='drop')
ct.fit_transform(X_df)
ct = ColumnTransformer([('trans', TransAssert(), ['first', 'second'])])
ct.fit_transform(X_df)
# integer column spec + integer column names -> still use positional
X_df2 = X_df.copy()
X_df2.columns = [1, 0]
ct = ColumnTransformer([('trans', Trans(), 0)], remainder='drop')
assert_array_equal(ct.fit_transform(X_df), X_res_first)
assert_array_equal(ct.fit(X_df).transform(X_df), X_res_first)
def test_column_transformer_sparse_array():
X_sparse = sparse.eye(3, 2).tocsr()
# no distinction between 1D and 2D
X_res_first = X_sparse[:, 0]
X_res_both = X_sparse
for col in [0, [0], slice(0, 1)]:
for remainder, res in [('drop', X_res_first),
('passthrough', X_res_both)]:
ct = ColumnTransformer([('trans', Trans(), col)],
remainder=remainder)
assert_true(sparse.issparse(ct.fit_transform(X_sparse)))
assert_allclose_dense_sparse(ct.fit_transform(X_sparse), res)
assert_allclose_dense_sparse(ct.fit(X_sparse).transform(X_sparse),
res)
for col in [[0, 1], slice(0, 2)]:
ct = ColumnTransformer([('trans', Trans(), col)])
assert_true(sparse.issparse(ct.fit_transform(X_sparse)))
assert_allclose_dense_sparse(ct.fit_transform(X_sparse), X_res_both)
assert_allclose_dense_sparse(ct.fit(X_sparse).transform(X_sparse),
X_res_both)
def test_column_transformer_sparse_stacking():
X_array = np.array([[0, 1, 2], [2, 4, 6]]).T
col_trans = ColumnTransformer([('trans1', Trans(), [0]),
('trans2', SparseMatrixTrans(), 1)])
col_trans.fit(X_array)
X_trans = col_trans.transform(X_array)
assert_true(sparse.issparse(X_trans))
assert_equal(X_trans.shape, (X_trans.shape[0], X_trans.shape[0] + 1))
assert_array_equal(X_trans.toarray()[:, 1:], np.eye(X_trans.shape[0]))
def test_column_transformer_error_msg_1D():
X_array = np.array([[0., 1., 2.], [2., 4., 6.]]).T
col_trans = ColumnTransformer([('trans', StandardScaler(), 0)])
assert_raise_message(ValueError, "1D data passed to a transformer",
col_trans.fit, X_array)
assert_raise_message(ValueError, "1D data passed to a transformer",
col_trans.fit_transform, X_array)
class TransRaise(BaseEstimator):
def fit(self, X, y=None):
raise ValueError("specific message")
def transform(self, X, y=None):
raise ValueError("specific message")
col_trans = ColumnTransformer([('trans', TransRaise(), 0)])
for func in [col_trans.fit, col_trans.fit_transform]:
assert_raise_message(ValueError, "specific message", func, X_array)
def test_2D_transformer_output():
class TransNo2D(BaseEstimator):
def fit(self, X, y=None):
return self
def transform(self, X, y=None):
return X
X_array = | np.array([[0, 1, 2], [2, 4, 6]]) | numpy.array |
""" Copyright 2019 <NAME>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
"""
Sample demonstration of hyperparameter tuning of a swap gate on 2 qubits on a 4 qubit system, a more complicated example compared to time_basis_cnot.
This example uses the sinusoidal basis and varies control power level, control bandwidth, in addition to the control length.
It also uses an external optimizer, Scipy's L-BFGS-B, a second order gradient based optimizer with simple constraints for much faster and better convergence on complex pulses.
It saves the results in graphical format and NMR machine parsable format.
"""
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import datetime
import pathlib
from basisgen import sinusoidal_basis_gen
from padqoc import PADQOC
from scipy.optimize import minimize,Bounds
#Pauli Matrices
px = np.array([[0,1+0j],[1+0j,0]])
py = np.array([[0,-1j],[1j,0]])
pz = np.array([[1+0j,0],[0,-1+0j]])
pi = np.array([[1+0j,0],[0,1+0j]])
pi2 = np.kron(pi,pi)
pi3 = np.kron(pi,pi2)
pi4 = np.kron(pi2,pi2)
#Pauli Matrices Generator
def pcat(s):
cs = 1
for c in s:
if(c=='i'):
cs = | np.kron(cs,pi) | numpy.kron |
import itertools
import os
import re
import numpy as np
from numpy.testing import assert_allclose, assert_almost_equal
from numpy.testing import assert_array_almost_equal, assert_array_equal
from scipy import sparse
import pytest
from sklearn.base import clone
from sklearn.datasets import load_iris, make_classification
from sklearn.metrics import log_loss
from sklearn.metrics import get_scorer
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import train_test_split
from sklearn.model_selection import cross_val_score
from sklearn.preprocessing import LabelEncoder, StandardScaler
from sklearn.utils import compute_class_weight, _IS_32BIT
from sklearn.utils._testing import ignore_warnings
from sklearn.utils import shuffle
from sklearn.linear_model import SGDClassifier
from sklearn.preprocessing import scale
from sklearn.utils._testing import skip_if_no_parallel
from sklearn.exceptions import ConvergenceWarning
from sklearn.linear_model._logistic import (
_log_reg_scoring_path,
_logistic_regression_path,
LogisticRegression,
LogisticRegressionCV,
)
X = [[-1, 0], [0, 1], [1, 1]]
X_sp = sparse.csr_matrix(X)
Y1 = [0, 1, 1]
Y2 = [2, 1, 0]
iris = load_iris()
def check_predictions(clf, X, y):
"""Check that the model is able to fit the classification data"""
n_samples = len(y)
classes = np.unique(y)
n_classes = classes.shape[0]
predicted = clf.fit(X, y).predict(X)
assert_array_equal(clf.classes_, classes)
assert predicted.shape == (n_samples,)
assert_array_equal(predicted, y)
probabilities = clf.predict_proba(X)
assert probabilities.shape == (n_samples, n_classes)
assert_array_almost_equal(probabilities.sum(axis=1), np.ones(n_samples))
assert_array_equal(probabilities.argmax(axis=1), y)
def test_predict_2_classes():
# Simple sanity check on a 2 classes dataset
# Make sure it predicts the correct result on simple datasets.
check_predictions(LogisticRegression(random_state=0), X, Y1)
check_predictions(LogisticRegression(random_state=0), X_sp, Y1)
check_predictions(LogisticRegression(C=100, random_state=0), X, Y1)
check_predictions(LogisticRegression(C=100, random_state=0), X_sp, Y1)
check_predictions(LogisticRegression(fit_intercept=False, random_state=0), X, Y1)
check_predictions(LogisticRegression(fit_intercept=False, random_state=0), X_sp, Y1)
def test_error():
# Test for appropriate exception on errors
msg = "Penalty term must be positive"
with pytest.raises(ValueError, match=msg):
LogisticRegression(C=-1).fit(X, Y1)
with pytest.raises(ValueError, match=msg):
LogisticRegression(C="test").fit(X, Y1)
msg = "is not a valid scoring value"
with pytest.raises(ValueError, match=msg):
LogisticRegressionCV(scoring="bad-scorer", cv=2).fit(X, Y1)
for LR in [LogisticRegression, LogisticRegressionCV]:
msg = "Tolerance for stopping criteria must be positive"
with pytest.raises(ValueError, match=msg):
LR(tol=-1).fit(X, Y1)
with pytest.raises(ValueError, match=msg):
LR(tol="test").fit(X, Y1)
msg = "Maximum number of iteration must be positive"
with pytest.raises(ValueError, match=msg):
LR(max_iter=-1).fit(X, Y1)
with pytest.raises(ValueError, match=msg):
LR(max_iter="test").fit(X, Y1)
def test_logistic_cv_mock_scorer():
class MockScorer:
def __init__(self):
self.calls = 0
self.scores = [0.1, 0.4, 0.8, 0.5]
def __call__(self, model, X, y, sample_weight=None):
score = self.scores[self.calls % len(self.scores)]
self.calls += 1
return score
mock_scorer = MockScorer()
Cs = [1, 2, 3, 4]
cv = 2
lr = LogisticRegressionCV(Cs=Cs, scoring=mock_scorer, cv=cv)
lr.fit(X, Y1)
# Cs[2] has the highest score (0.8) from MockScorer
assert lr.C_[0] == Cs[2]
# scorer called 8 times (cv*len(Cs))
assert mock_scorer.calls == cv * len(Cs)
# reset mock_scorer
mock_scorer.calls = 0
custom_score = lr.score(X, lr.predict(X))
assert custom_score == mock_scorer.scores[0]
assert mock_scorer.calls == 1
@skip_if_no_parallel
def test_lr_liblinear_warning():
n_samples, n_features = iris.data.shape
target = iris.target_names[iris.target]
lr = LogisticRegression(solver="liblinear", n_jobs=2)
warning_message = (
"'n_jobs' > 1 does not have any effect when"
" 'solver' is set to 'liblinear'. Got 'n_jobs'"
" = 2."
)
with pytest.warns(UserWarning, match=warning_message):
lr.fit(iris.data, target)
def test_predict_3_classes():
check_predictions(LogisticRegression(C=10), X, Y2)
check_predictions(LogisticRegression(C=10), X_sp, Y2)
def test_predict_iris():
# Test logistic regression with the iris dataset
n_samples, n_features = iris.data.shape
target = iris.target_names[iris.target]
# Test that both multinomial and OvR solvers handle
# multiclass data correctly and give good accuracy
# score (>0.95) for the training data.
for clf in [
LogisticRegression(C=len(iris.data), solver="liblinear", multi_class="ovr"),
LogisticRegression(C=len(iris.data), solver="lbfgs", multi_class="multinomial"),
LogisticRegression(
C=len(iris.data), solver="newton-cg", multi_class="multinomial"
),
LogisticRegression(
C=len(iris.data), solver="sag", tol=1e-2, multi_class="ovr", random_state=42
),
LogisticRegression(
C=len(iris.data),
solver="saga",
tol=1e-2,
multi_class="ovr",
random_state=42,
),
]:
clf.fit(iris.data, target)
assert_array_equal(np.unique(target), clf.classes_)
pred = clf.predict(iris.data)
assert np.mean(pred == target) > 0.95
probabilities = clf.predict_proba(iris.data)
assert_array_almost_equal(probabilities.sum(axis=1), np.ones(n_samples))
pred = iris.target_names[probabilities.argmax(axis=1)]
assert np.mean(pred == target) > 0.95
@pytest.mark.parametrize("solver", ["lbfgs", "newton-cg", "sag", "saga"])
def test_multinomial_validation(solver):
lr = LogisticRegression(C=-1, solver=solver, multi_class="multinomial")
with pytest.raises(ValueError):
lr.fit([[0, 1], [1, 0]], [0, 1])
@pytest.mark.parametrize("LR", [LogisticRegression, LogisticRegressionCV])
def test_check_solver_option(LR):
X, y = iris.data, iris.target
msg = (
r"Logistic Regression supports only solvers in \['liblinear', "
r"'newton-cg', 'lbfgs', 'sag', 'saga'\], got wrong_name."
)
lr = LR(solver="wrong_name", multi_class="ovr")
with pytest.raises(ValueError, match=msg):
lr.fit(X, y)
msg = "multi_class should be 'multinomial', 'ovr' or 'auto'. Got wrong_name"
lr = LR(solver="newton-cg", multi_class="wrong_name")
with pytest.raises(ValueError, match=msg):
lr.fit(X, y)
# only 'liblinear' solver
msg = "Solver liblinear does not support a multinomial backend."
lr = LR(solver="liblinear", multi_class="multinomial")
with pytest.raises(ValueError, match=msg):
lr.fit(X, y)
# all solvers except 'liblinear' and 'saga'
for solver in ["newton-cg", "lbfgs", "sag"]:
msg = "Solver %s supports only 'l2' or 'none' penalties," % solver
lr = LR(solver=solver, penalty="l1", multi_class="ovr")
with pytest.raises(ValueError, match=msg):
lr.fit(X, y)
for solver in ["newton-cg", "lbfgs", "sag", "saga"]:
msg = "Solver %s supports only dual=False, got dual=True" % solver
lr = LR(solver=solver, dual=True, multi_class="ovr")
with pytest.raises(ValueError, match=msg):
lr.fit(X, y)
# only saga supports elasticnet. We only test for liblinear because the
# error is raised before for the other solvers (solver %s supports only l2
# penalties)
for solver in ["liblinear"]:
msg = "Only 'saga' solver supports elasticnet penalty, got solver={}.".format(
solver
)
lr = LR(solver=solver, penalty="elasticnet")
with pytest.raises(ValueError, match=msg):
lr.fit(X, y)
# liblinear does not support penalty='none'
msg = "penalty='none' is not supported for the liblinear solver"
lr = LR(penalty="none", solver="liblinear")
with pytest.raises(ValueError, match=msg):
lr.fit(X, y)
@pytest.mark.parametrize("solver", ["lbfgs", "newton-cg", "sag", "saga"])
def test_multinomial_binary(solver):
# Test multinomial LR on a binary problem.
target = (iris.target > 0).astype(np.intp)
target = np.array(["setosa", "not-setosa"])[target]
clf = LogisticRegression(
solver=solver, multi_class="multinomial", random_state=42, max_iter=2000
)
clf.fit(iris.data, target)
assert clf.coef_.shape == (1, iris.data.shape[1])
assert clf.intercept_.shape == (1,)
assert_array_equal(clf.predict(iris.data), target)
mlr = LogisticRegression(
solver=solver, multi_class="multinomial", random_state=42, fit_intercept=False
)
mlr.fit(iris.data, target)
pred = clf.classes_[np.argmax(clf.predict_log_proba(iris.data), axis=1)]
assert np.mean(pred == target) > 0.9
def test_multinomial_binary_probabilities():
# Test multinomial LR gives expected probabilities based on the
# decision function, for a binary problem.
X, y = make_classification()
clf = LogisticRegression(multi_class="multinomial", solver="saga")
clf.fit(X, y)
decision = clf.decision_function(X)
proba = clf.predict_proba(X)
expected_proba_class_1 = np.exp(decision) / (np.exp(decision) + np.exp(-decision))
expected_proba = np.c_[1 - expected_proba_class_1, expected_proba_class_1]
assert_almost_equal(proba, expected_proba)
def test_sparsify():
# Test sparsify and densify members.
n_samples, n_features = iris.data.shape
target = iris.target_names[iris.target]
clf = LogisticRegression(random_state=0).fit(iris.data, target)
pred_d_d = clf.decision_function(iris.data)
clf.sparsify()
assert sparse.issparse(clf.coef_)
pred_s_d = clf.decision_function(iris.data)
sp_data = sparse.coo_matrix(iris.data)
pred_s_s = clf.decision_function(sp_data)
clf.densify()
pred_d_s = clf.decision_function(sp_data)
assert_array_almost_equal(pred_d_d, pred_s_d)
assert_array_almost_equal(pred_d_d, pred_s_s)
assert_array_almost_equal(pred_d_d, pred_d_s)
def test_inconsistent_input():
# Test that an exception is raised on inconsistent input
rng = np.random.RandomState(0)
X_ = rng.random_sample((5, 10))
y_ = np.ones(X_.shape[0])
y_[0] = 0
clf = LogisticRegression(random_state=0)
# Wrong dimensions for training data
y_wrong = y_[:-1]
with pytest.raises(ValueError):
clf.fit(X, y_wrong)
# Wrong dimensions for test data
with pytest.raises(ValueError):
clf.fit(X_, y_).predict(rng.random_sample((3, 12)))
def test_write_parameters():
# Test that we can write to coef_ and intercept_
clf = LogisticRegression(random_state=0)
clf.fit(X, Y1)
clf.coef_[:] = 0
clf.intercept_[:] = 0
assert_array_almost_equal(clf.decision_function(X), 0)
def test_nan():
# Test proper NaN handling.
# Regression test for Issue #252: fit used to go into an infinite loop.
Xnan = np.array(X, dtype=np.float64)
Xnan[0, 1] = np.nan
logistic = LogisticRegression(random_state=0)
with pytest.raises(ValueError):
logistic.fit(Xnan, Y1)
def test_consistency_path():
# Test that the path algorithm is consistent
rng = np.random.RandomState(0)
X = np.concatenate((rng.randn(100, 2) + [1, 1], rng.randn(100, 2)))
y = [1] * 100 + [-1] * 100
Cs = np.logspace(0, 4, 10)
f = ignore_warnings
# can't test with fit_intercept=True since LIBLINEAR
# penalizes the intercept
for solver in ["sag", "saga"]:
coefs, Cs, _ = f(_logistic_regression_path)(
X,
y,
Cs=Cs,
fit_intercept=False,
tol=1e-5,
solver=solver,
max_iter=1000,
multi_class="ovr",
random_state=0,
)
for i, C in enumerate(Cs):
lr = LogisticRegression(
C=C,
fit_intercept=False,
tol=1e-5,
solver=solver,
multi_class="ovr",
random_state=0,
max_iter=1000,
)
lr.fit(X, y)
lr_coef = lr.coef_.ravel()
assert_array_almost_equal(
lr_coef, coefs[i], decimal=4, err_msg="with solver = %s" % solver
)
# test for fit_intercept=True
for solver in ("lbfgs", "newton-cg", "liblinear", "sag", "saga"):
Cs = [1e3]
coefs, Cs, _ = f(_logistic_regression_path)(
X,
y,
Cs=Cs,
tol=1e-6,
solver=solver,
intercept_scaling=10000.0,
random_state=0,
multi_class="ovr",
)
lr = LogisticRegression(
C=Cs[0],
tol=1e-4,
intercept_scaling=10000.0,
random_state=0,
multi_class="ovr",
solver=solver,
)
lr.fit(X, y)
lr_coef = np.concatenate([lr.coef_.ravel(), lr.intercept_])
assert_array_almost_equal(
lr_coef, coefs[0], decimal=4, err_msg="with solver = %s" % solver
)
def test_logistic_regression_path_convergence_fail():
rng = np.random.RandomState(0)
X = np.concatenate((rng.randn(100, 2) + [1, 1], rng.randn(100, 2)))
y = [1] * 100 + [-1] * 100
Cs = [1e3]
# Check that the convergence message points to both a model agnostic
# advice (scaling the data) and to the logistic regression specific
# documentation that includes hints on the solver configuration.
with pytest.warns(ConvergenceWarning) as record:
_logistic_regression_path(
X, y, Cs=Cs, tol=0.0, max_iter=1, random_state=0, verbose=0
)
assert len(record) == 1
warn_msg = record[0].message.args[0]
assert "lbfgs failed to converge" in warn_msg
assert "Increase the number of iterations" in warn_msg
assert "scale the data" in warn_msg
assert "linear_model.html#logistic-regression" in warn_msg
def test_liblinear_dual_random_state():
# random_state is relevant for liblinear solver only if dual=True
X, y = make_classification(n_samples=20, random_state=0)
lr1 = LogisticRegression(
random_state=0,
dual=True,
max_iter=1,
tol=1e-15,
solver="liblinear",
multi_class="ovr",
)
lr1.fit(X, y)
lr2 = LogisticRegression(
random_state=0,
dual=True,
max_iter=1,
tol=1e-15,
solver="liblinear",
multi_class="ovr",
)
lr2.fit(X, y)
lr3 = LogisticRegression(
random_state=8,
dual=True,
max_iter=1,
tol=1e-15,
solver="liblinear",
multi_class="ovr",
)
lr3.fit(X, y)
# same result for same random state
assert_array_almost_equal(lr1.coef_, lr2.coef_)
# different results for different random states
msg = "Arrays are not almost equal to 6 decimals"
with pytest.raises(AssertionError, match=msg):
assert_array_almost_equal(lr1.coef_, lr3.coef_)
def test_logistic_cv():
# test for LogisticRegressionCV object
n_samples, n_features = 50, 5
rng = np.random.RandomState(0)
X_ref = rng.randn(n_samples, n_features)
y = np.sign(X_ref.dot(5 * rng.randn(n_features)))
X_ref -= X_ref.mean()
X_ref /= X_ref.std()
lr_cv = LogisticRegressionCV(
Cs=[1.0], fit_intercept=False, solver="liblinear", multi_class="ovr", cv=3
)
lr_cv.fit(X_ref, y)
lr = LogisticRegression(
C=1.0, fit_intercept=False, solver="liblinear", multi_class="ovr"
)
lr.fit(X_ref, y)
assert_array_almost_equal(lr.coef_, lr_cv.coef_)
assert_array_equal(lr_cv.coef_.shape, (1, n_features))
assert_array_equal(lr_cv.classes_, [-1, 1])
assert len(lr_cv.classes_) == 2
coefs_paths = np.asarray(list(lr_cv.coefs_paths_.values()))
assert_array_equal(coefs_paths.shape, (1, 3, 1, n_features))
assert_array_equal(lr_cv.Cs_.shape, (1,))
scores = np.asarray(list(lr_cv.scores_.values()))
assert_array_equal(scores.shape, (1, 3, 1))
@pytest.mark.parametrize(
"scoring, multiclass_agg_list",
[
("accuracy", [""]),
("precision", ["_macro", "_weighted"]),
# no need to test for micro averaging because it
# is the same as accuracy for f1, precision,
# and recall (see https://github.com/
# scikit-learn/scikit-learn/pull/
# 11578#discussion_r203250062)
("f1", ["_macro", "_weighted"]),
("neg_log_loss", [""]),
("recall", ["_macro", "_weighted"]),
],
)
def test_logistic_cv_multinomial_score(scoring, multiclass_agg_list):
# test that LogisticRegressionCV uses the right score to compute its
# cross-validation scores when using a multinomial scoring
# see https://github.com/scikit-learn/scikit-learn/issues/8720
X, y = make_classification(
n_samples=100, random_state=0, n_classes=3, n_informative=6
)
train, test = np.arange(80), np.arange(80, 100)
lr = LogisticRegression(C=1.0, multi_class="multinomial")
# we use lbfgs to support multinomial
params = lr.get_params()
# we store the params to set them further in _log_reg_scoring_path
for key in ["C", "n_jobs", "warm_start"]:
del params[key]
lr.fit(X[train], y[train])
for averaging in multiclass_agg_list:
scorer = get_scorer(scoring + averaging)
assert_array_almost_equal(
_log_reg_scoring_path(
X, y, train, test, Cs=[1.0], scoring=scorer, **params
)[2][0],
scorer(lr, X[test], y[test]),
)
def test_multinomial_logistic_regression_string_inputs():
# Test with string labels for LogisticRegression(CV)
n_samples, n_features, n_classes = 50, 5, 3
X_ref, y = make_classification(
n_samples=n_samples,
n_features=n_features,
n_classes=n_classes,
n_informative=3,
random_state=0,
)
y_str = LabelEncoder().fit(["bar", "baz", "foo"]).inverse_transform(y)
# For numerical labels, let y values be taken from set (-1, 0, 1)
y = np.array(y) - 1
# Test for string labels
lr = LogisticRegression(multi_class="multinomial")
lr_cv = LogisticRegressionCV(multi_class="multinomial", Cs=3)
lr_str = LogisticRegression(multi_class="multinomial")
lr_cv_str = LogisticRegressionCV(multi_class="multinomial", Cs=3)
lr.fit(X_ref, y)
lr_cv.fit(X_ref, y)
lr_str.fit(X_ref, y_str)
lr_cv_str.fit(X_ref, y_str)
assert_array_almost_equal(lr.coef_, lr_str.coef_)
assert sorted(lr_str.classes_) == ["bar", "baz", "foo"]
assert_array_almost_equal(lr_cv.coef_, lr_cv_str.coef_)
assert sorted(lr_str.classes_) == ["bar", "baz", "foo"]
assert sorted(lr_cv_str.classes_) == ["bar", "baz", "foo"]
# The predictions should be in original labels
assert sorted(np.unique(lr_str.predict(X_ref))) == ["bar", "baz", "foo"]
assert sorted(np.unique(lr_cv_str.predict(X_ref))) == ["bar", "baz", "foo"]
# Make sure class weights can be given with string labels
lr_cv_str = LogisticRegression(
class_weight={"bar": 1, "baz": 2, "foo": 0}, multi_class="multinomial"
).fit(X_ref, y_str)
assert sorted(np.unique(lr_cv_str.predict(X_ref))) == ["bar", "baz"]
def test_logistic_cv_sparse():
X, y = make_classification(n_samples=50, n_features=5, random_state=0)
X[X < 1.0] = 0.0
csr = sparse.csr_matrix(X)
clf = LogisticRegressionCV()
clf.fit(X, y)
clfs = LogisticRegressionCV()
clfs.fit(csr, y)
assert_array_almost_equal(clfs.coef_, clf.coef_)
assert_array_almost_equal(clfs.intercept_, clf.intercept_)
assert clfs.C_ == clf.C_
def test_ovr_multinomial_iris():
# Test that OvR and multinomial are correct using the iris dataset.
train, target = iris.data, iris.target
n_samples, n_features = train.shape
# The cv indices from stratified kfold (where stratification is done based
# on the fine-grained iris classes, i.e, before the classes 0 and 1 are
# conflated) is used for both clf and clf1
n_cv = 2
cv = StratifiedKFold(n_cv)
precomputed_folds = list(cv.split(train, target))
# Train clf on the original dataset where classes 0 and 1 are separated
clf = LogisticRegressionCV(cv=precomputed_folds, multi_class="ovr")
clf.fit(train, target)
# Conflate classes 0 and 1 and train clf1 on this modified dataset
clf1 = LogisticRegressionCV(cv=precomputed_folds, multi_class="ovr")
target_copy = target.copy()
target_copy[target_copy == 0] = 1
clf1.fit(train, target_copy)
# Ensure that what OvR learns for class2 is same regardless of whether
# classes 0 and 1 are separated or not
assert_allclose(clf.scores_[2], clf1.scores_[2])
assert_allclose(clf.intercept_[2:], clf1.intercept_)
assert_allclose(clf.coef_[2][np.newaxis, :], clf1.coef_)
# Test the shape of various attributes.
assert clf.coef_.shape == (3, n_features)
assert_array_equal(clf.classes_, [0, 1, 2])
coefs_paths = np.asarray(list(clf.coefs_paths_.values()))
assert coefs_paths.shape == (3, n_cv, 10, n_features + 1)
assert clf.Cs_.shape == (10,)
scores = np.asarray(list(clf.scores_.values()))
assert scores.shape == (3, n_cv, 10)
# Test that for the iris data multinomial gives a better accuracy than OvR
for solver in ["lbfgs", "newton-cg", "sag", "saga"]:
max_iter = 500 if solver in ["sag", "saga"] else 15
clf_multi = LogisticRegressionCV(
solver=solver,
multi_class="multinomial",
max_iter=max_iter,
random_state=42,
tol=1e-3 if solver in ["sag", "saga"] else 1e-2,
cv=2,
)
clf_multi.fit(train, target)
multi_score = clf_multi.score(train, target)
ovr_score = clf.score(train, target)
assert multi_score > ovr_score
# Test attributes of LogisticRegressionCV
assert clf.coef_.shape == clf_multi.coef_.shape
assert_array_equal(clf_multi.classes_, [0, 1, 2])
coefs_paths = np.asarray(list(clf_multi.coefs_paths_.values()))
assert coefs_paths.shape == (3, n_cv, 10, n_features + 1)
assert clf_multi.Cs_.shape == (10,)
scores = np.asarray(list(clf_multi.scores_.values()))
assert scores.shape == (3, n_cv, 10)
def test_logistic_regression_solvers():
"""Test solvers converge to the same result."""
X, y = make_classification(n_features=10, n_informative=5, random_state=0)
params = dict(fit_intercept=False, random_state=42, multi_class="ovr")
solvers = ("newton-cg", "lbfgs", "liblinear", "sag", "saga")
regressors = {
solver: LogisticRegression(solver=solver, **params).fit(X, y)
for solver in solvers
}
for solver_1, solver_2 in itertools.combinations(regressors, r=2):
assert_array_almost_equal(
regressors[solver_1].coef_, regressors[solver_2].coef_, decimal=3
)
def test_logistic_regression_solvers_multiclass():
"""Test solvers converge to the same result for multiclass problems."""
X, y = make_classification(
n_samples=20, n_features=20, n_informative=10, n_classes=3, random_state=0
)
tol = 1e-7
params = dict(fit_intercept=False, tol=tol, random_state=42, multi_class="ovr")
solvers = ("newton-cg", "lbfgs", "liblinear", "sag", "saga")
# Override max iteration count for specific solvers to allow for
# proper convergence.
solver_max_iter = {"sag": 1000, "saga": 10000}
regressors = {
solver: LogisticRegression(
solver=solver, max_iter=solver_max_iter.get(solver, 100), **params
).fit(X, y)
for solver in solvers
}
for solver_1, solver_2 in itertools.combinations(regressors, r=2):
assert_array_almost_equal(
regressors[solver_1].coef_, regressors[solver_2].coef_, decimal=4
)
def test_logistic_regressioncv_class_weights():
for weight in [{0: 0.1, 1: 0.2}, {0: 0.1, 1: 0.2, 2: 0.5}]:
n_classes = len(weight)
for class_weight in (weight, "balanced"):
X, y = make_classification(
n_samples=30,
n_features=3,
n_repeated=0,
n_informative=3,
n_redundant=0,
n_classes=n_classes,
random_state=0,
)
clf_lbf = LogisticRegressionCV(
solver="lbfgs",
Cs=1,
fit_intercept=False,
multi_class="ovr",
class_weight=class_weight,
)
clf_ncg = LogisticRegressionCV(
solver="newton-cg",
Cs=1,
fit_intercept=False,
multi_class="ovr",
class_weight=class_weight,
)
clf_lib = LogisticRegressionCV(
solver="liblinear",
Cs=1,
fit_intercept=False,
multi_class="ovr",
class_weight=class_weight,
)
clf_sag = LogisticRegressionCV(
solver="sag",
Cs=1,
fit_intercept=False,
multi_class="ovr",
class_weight=class_weight,
tol=1e-5,
max_iter=10000,
random_state=0,
)
clf_saga = LogisticRegressionCV(
solver="saga",
Cs=1,
fit_intercept=False,
multi_class="ovr",
class_weight=class_weight,
tol=1e-5,
max_iter=10000,
random_state=0,
)
clf_lbf.fit(X, y)
clf_ncg.fit(X, y)
clf_lib.fit(X, y)
clf_sag.fit(X, y)
clf_saga.fit(X, y)
assert_array_almost_equal(clf_lib.coef_, clf_lbf.coef_, decimal=4)
assert_array_almost_equal(clf_ncg.coef_, clf_lbf.coef_, decimal=4)
assert_array_almost_equal(clf_sag.coef_, clf_lbf.coef_, decimal=4)
assert_array_almost_equal(clf_saga.coef_, clf_lbf.coef_, decimal=4)
def test_logistic_regression_sample_weights():
X, y = make_classification(
n_samples=20, n_features=5, n_informative=3, n_classes=2, random_state=0
)
sample_weight = y + 1
for LR in [LogisticRegression, LogisticRegressionCV]:
kw = {"random_state": 42, "fit_intercept": False, "multi_class": "ovr"}
if LR is LogisticRegressionCV:
kw.update({"Cs": 3, "cv": 3})
# Test that passing sample_weight as ones is the same as
# not passing them at all (default None)
for solver in ["lbfgs", "liblinear"]:
clf_sw_none = LR(solver=solver, **kw)
clf_sw_ones = LR(solver=solver, **kw)
clf_sw_none.fit(X, y)
clf_sw_ones.fit(X, y, sample_weight=np.ones(y.shape[0]))
assert_array_almost_equal(clf_sw_none.coef_, clf_sw_ones.coef_, decimal=4)
# Test that sample weights work the same with the lbfgs,
# newton-cg, and 'sag' solvers
clf_sw_lbfgs = LR(**kw)
clf_sw_lbfgs.fit(X, y, sample_weight=sample_weight)
clf_sw_n = LR(solver="newton-cg", **kw)
clf_sw_n.fit(X, y, sample_weight=sample_weight)
clf_sw_sag = LR(solver="sag", tol=1e-10, **kw)
# ignore convergence warning due to small dataset
with ignore_warnings():
clf_sw_sag.fit(X, y, sample_weight=sample_weight)
clf_sw_liblinear = LR(solver="liblinear", **kw)
clf_sw_liblinear.fit(X, y, sample_weight=sample_weight)
assert_array_almost_equal(clf_sw_lbfgs.coef_, clf_sw_n.coef_, decimal=4)
assert_array_almost_equal(clf_sw_lbfgs.coef_, clf_sw_sag.coef_, decimal=4)
assert_array_almost_equal(clf_sw_lbfgs.coef_, clf_sw_liblinear.coef_, decimal=4)
# Test that passing class_weight as [1,2] is the same as
# passing class weight = [1,1] but adjusting sample weights
# to be 2 for all instances of class 2
for solver in ["lbfgs", "liblinear"]:
clf_cw_12 = LR(solver=solver, class_weight={0: 1, 1: 2}, **kw)
clf_cw_12.fit(X, y)
clf_sw_12 = LR(solver=solver, **kw)
clf_sw_12.fit(X, y, sample_weight=sample_weight)
assert_array_almost_equal(clf_cw_12.coef_, clf_sw_12.coef_, decimal=4)
# Test the above for l1 penalty and l2 penalty with dual=True.
# since the patched liblinear code is different.
clf_cw = LogisticRegression(
solver="liblinear",
fit_intercept=False,
class_weight={0: 1, 1: 2},
penalty="l1",
tol=1e-5,
random_state=42,
multi_class="ovr",
)
clf_cw.fit(X, y)
clf_sw = LogisticRegression(
solver="liblinear",
fit_intercept=False,
penalty="l1",
tol=1e-5,
random_state=42,
multi_class="ovr",
)
clf_sw.fit(X, y, sample_weight)
assert_array_almost_equal(clf_cw.coef_, clf_sw.coef_, decimal=4)
clf_cw = LogisticRegression(
solver="liblinear",
fit_intercept=False,
class_weight={0: 1, 1: 2},
penalty="l2",
dual=True,
random_state=42,
multi_class="ovr",
)
clf_cw.fit(X, y)
clf_sw = LogisticRegression(
solver="liblinear",
fit_intercept=False,
penalty="l2",
dual=True,
random_state=42,
multi_class="ovr",
)
clf_sw.fit(X, y, sample_weight)
assert_array_almost_equal(clf_cw.coef_, clf_sw.coef_, decimal=4)
def _compute_class_weight_dictionary(y):
# helper for returning a dictionary instead of an array
classes = np.unique(y)
class_weight = compute_class_weight("balanced", classes=classes, y=y)
class_weight_dict = dict(zip(classes, class_weight))
return class_weight_dict
def test_logistic_regression_class_weights():
# Multinomial case: remove 90% of class 0
X = iris.data[45:, :]
y = iris.target[45:]
solvers = ("lbfgs", "newton-cg")
class_weight_dict = _compute_class_weight_dictionary(y)
for solver in solvers:
clf1 = LogisticRegression(
solver=solver, multi_class="multinomial", class_weight="balanced"
)
clf2 = LogisticRegression(
solver=solver, multi_class="multinomial", class_weight=class_weight_dict
)
clf1.fit(X, y)
clf2.fit(X, y)
assert_array_almost_equal(clf1.coef_, clf2.coef_, decimal=4)
# Binary case: remove 90% of class 0 and 100% of class 2
X = iris.data[45:100, :]
y = iris.target[45:100]
solvers = ("lbfgs", "newton-cg", "liblinear")
class_weight_dict = _compute_class_weight_dictionary(y)
for solver in solvers:
clf1 = LogisticRegression(
solver=solver, multi_class="ovr", class_weight="balanced"
)
clf2 = LogisticRegression(
solver=solver, multi_class="ovr", class_weight=class_weight_dict
)
clf1.fit(X, y)
clf2.fit(X, y)
assert_array_almost_equal(clf1.coef_, clf2.coef_, decimal=6)
def test_logistic_regression_multinomial():
# Tests for the multinomial option in logistic regression
# Some basic attributes of Logistic Regression
n_samples, n_features, n_classes = 50, 20, 3
X, y = make_classification(
n_samples=n_samples,
n_features=n_features,
n_informative=10,
n_classes=n_classes,
random_state=0,
)
X = StandardScaler(with_mean=False).fit_transform(X)
# 'lbfgs' is used as a referenced
solver = "lbfgs"
ref_i = LogisticRegression(solver=solver, multi_class="multinomial")
ref_w = LogisticRegression(
solver=solver, multi_class="multinomial", fit_intercept=False
)
ref_i.fit(X, y)
ref_w.fit(X, y)
assert ref_i.coef_.shape == (n_classes, n_features)
assert ref_w.coef_.shape == (n_classes, n_features)
for solver in ["sag", "saga", "newton-cg"]:
clf_i = LogisticRegression(
solver=solver,
multi_class="multinomial",
random_state=42,
max_iter=2000,
tol=1e-7,
)
clf_w = LogisticRegression(
solver=solver,
multi_class="multinomial",
random_state=42,
max_iter=2000,
tol=1e-7,
fit_intercept=False,
)
clf_i.fit(X, y)
clf_w.fit(X, y)
assert clf_i.coef_.shape == (n_classes, n_features)
assert clf_w.coef_.shape == (n_classes, n_features)
# Compare solutions between lbfgs and the other solvers
assert_allclose(ref_i.coef_, clf_i.coef_, rtol=1e-2)
assert_allclose(ref_w.coef_, clf_w.coef_, rtol=1e-2)
assert_allclose(ref_i.intercept_, clf_i.intercept_, rtol=1e-2)
# Test that the path give almost the same results. However since in this
# case we take the average of the coefs after fitting across all the
# folds, it need not be exactly the same.
for solver in ["lbfgs", "newton-cg", "sag", "saga"]:
clf_path = LogisticRegressionCV(
solver=solver, max_iter=2000, tol=1e-6, multi_class="multinomial", Cs=[1.0]
)
clf_path.fit(X, y)
assert_allclose(clf_path.coef_, ref_i.coef_, rtol=2e-2)
assert_allclose(clf_path.intercept_, ref_i.intercept_, rtol=2e-2)
def test_liblinear_decision_function_zero():
# Test negative prediction when decision_function values are zero.
# Liblinear predicts the positive class when decision_function values
# are zero. This is a test to verify that we do not do the same.
# See Issue: https://github.com/scikit-learn/scikit-learn/issues/3600
# and the PR https://github.com/scikit-learn/scikit-learn/pull/3623
X, y = make_classification(n_samples=5, n_features=5, random_state=0)
clf = LogisticRegression(fit_intercept=False, solver="liblinear", multi_class="ovr")
clf.fit(X, y)
# Dummy data such that the decision function becomes zero.
X = np.zeros((5, 5))
assert_array_equal(clf.predict(X), np.zeros(5))
def test_liblinear_logregcv_sparse():
# Test LogRegCV with solver='liblinear' works for sparse matrices
X, y = make_classification(n_samples=10, n_features=5, random_state=0)
clf = LogisticRegressionCV(solver="liblinear", multi_class="ovr")
clf.fit(sparse.csr_matrix(X), y)
def test_saga_sparse():
# Test LogRegCV with solver='liblinear' works for sparse matrices
X, y = make_classification(n_samples=10, n_features=5, random_state=0)
clf = LogisticRegressionCV(solver="saga")
clf.fit(sparse.csr_matrix(X), y)
def test_logreg_intercept_scaling():
# Test that the right error message is thrown when intercept_scaling <= 0
for i in [-1, 0]:
clf = LogisticRegression(
intercept_scaling=i, solver="liblinear", multi_class="ovr"
)
msg = (
"Intercept scaling is %r but needs to be greater than 0."
" To disable fitting an intercept,"
" set fit_intercept=False."
% clf.intercept_scaling
)
with pytest.raises(ValueError, match=msg):
clf.fit(X, Y1)
def test_logreg_intercept_scaling_zero():
# Test that intercept_scaling is ignored when fit_intercept is False
clf = LogisticRegression(fit_intercept=False)
clf.fit(X, Y1)
assert clf.intercept_ == 0.0
def test_logreg_l1():
# Because liblinear penalizes the intercept and saga does not, we do not
# fit the intercept to make it possible to compare the coefficients of
# the two models at convergence.
rng = np.random.RandomState(42)
n_samples = 50
X, y = make_classification(n_samples=n_samples, n_features=20, random_state=0)
X_noise = rng.normal(size=(n_samples, 3))
X_constant = np.ones(shape=(n_samples, 2))
X = np.concatenate((X, X_noise, X_constant), axis=1)
lr_liblinear = LogisticRegression(
penalty="l1",
C=1.0,
solver="liblinear",
fit_intercept=False,
multi_class="ovr",
tol=1e-10,
)
lr_liblinear.fit(X, y)
lr_saga = LogisticRegression(
penalty="l1",
C=1.0,
solver="saga",
fit_intercept=False,
multi_class="ovr",
max_iter=1000,
tol=1e-10,
)
lr_saga.fit(X, y)
assert_array_almost_equal(lr_saga.coef_, lr_liblinear.coef_)
# Noise and constant features should be regularized to zero by the l1
# penalty
assert_array_almost_equal(lr_liblinear.coef_[0, -5:], np.zeros(5))
assert_array_almost_equal(lr_saga.coef_[0, -5:], np.zeros(5))
def test_logreg_l1_sparse_data():
# Because liblinear penalizes the intercept and saga does not, we do not
# fit the intercept to make it possible to compare the coefficients of
# the two models at convergence.
rng = np.random.RandomState(42)
n_samples = 50
X, y = make_classification(n_samples=n_samples, n_features=20, random_state=0)
X_noise = rng.normal(scale=0.1, size=(n_samples, 3))
X_constant = np.zeros(shape=(n_samples, 2))
X = np.concatenate((X, X_noise, X_constant), axis=1)
X[X < 1] = 0
X = sparse.csr_matrix(X)
lr_liblinear = LogisticRegression(
penalty="l1",
C=1.0,
solver="liblinear",
fit_intercept=False,
multi_class="ovr",
tol=1e-10,
)
lr_liblinear.fit(X, y)
lr_saga = LogisticRegression(
penalty="l1",
C=1.0,
solver="saga",
fit_intercept=False,
multi_class="ovr",
max_iter=1000,
tol=1e-10,
)
lr_saga.fit(X, y)
assert_array_almost_equal(lr_saga.coef_, lr_liblinear.coef_)
# Noise and constant features should be regularized to zero by the l1
# penalty
assert_array_almost_equal(lr_liblinear.coef_[0, -5:], np.zeros(5))
assert_array_almost_equal(lr_saga.coef_[0, -5:], np.zeros(5))
# Check that solving on the sparse and dense data yield the same results
lr_saga_dense = LogisticRegression(
penalty="l1",
C=1.0,
solver="saga",
fit_intercept=False,
multi_class="ovr",
max_iter=1000,
tol=1e-10,
)
lr_saga_dense.fit(X.toarray(), y)
assert_array_almost_equal(lr_saga.coef_, lr_saga_dense.coef_)
@pytest.mark.parametrize("random_seed", [42])
@pytest.mark.parametrize("penalty", ["l1", "l2"])
def test_logistic_regression_cv_refit(random_seed, penalty):
# Test that when refit=True, logistic regression cv with the saga solver
# converges to the same solution as logistic regression with a fixed
# regularization parameter.
# Internally the LogisticRegressionCV model uses a warm start to refit on
# the full data model with the optimal C found by CV. As the penalized
# logistic regression loss is convex, we should still recover exactly
# the same solution as long as the stopping criterion is strict enough (and
# that there are no exactly duplicated features when penalty='l1').
X, y = make_classification(n_samples=100, n_features=20, random_state=random_seed)
common_params = dict(
solver="saga",
penalty=penalty,
random_state=random_seed,
max_iter=1000,
tol=1e-12,
)
lr_cv = LogisticRegressionCV(Cs=[1.0], refit=True, **common_params)
lr_cv.fit(X, y)
lr = LogisticRegression(C=1.0, **common_params)
lr.fit(X, y)
assert_array_almost_equal(lr_cv.coef_, lr.coef_)
def test_logreg_predict_proba_multinomial():
X, y = make_classification(
n_samples=10, n_features=20, random_state=0, n_classes=3, n_informative=10
)
# Predicted probabilities using the true-entropy loss should give a
# smaller loss than those using the ovr method.
clf_multi = LogisticRegression(multi_class="multinomial", solver="lbfgs")
clf_multi.fit(X, y)
clf_multi_loss = log_loss(y, clf_multi.predict_proba(X))
clf_ovr = LogisticRegression(multi_class="ovr", solver="lbfgs")
clf_ovr.fit(X, y)
clf_ovr_loss = log_loss(y, clf_ovr.predict_proba(X))
assert clf_ovr_loss > clf_multi_loss
# Predicted probabilities using the soft-max function should give a
# smaller loss than those using the logistic function.
clf_multi_loss = log_loss(y, clf_multi.predict_proba(X))
clf_wrong_loss = log_loss(y, clf_multi._predict_proba_lr(X))
assert clf_wrong_loss > clf_multi_loss
@pytest.mark.parametrize("max_iter", np.arange(1, 5))
@pytest.mark.parametrize("multi_class", ["ovr", "multinomial"])
@pytest.mark.parametrize(
"solver, message",
[
(
"newton-cg",
"newton-cg failed to converge. Increase the number of iterations.",
),
(
"liblinear",
"Liblinear failed to converge, increase the number of iterations.",
),
("sag", "The max_iter was reached which means the coef_ did not converge"),
("saga", "The max_iter was reached which means the coef_ did not converge"),
("lbfgs", "lbfgs failed to converge"),
],
)
def test_max_iter(max_iter, multi_class, solver, message):
# Test that the maximum number of iteration is reached
X, y_bin = iris.data, iris.target.copy()
y_bin[y_bin == 2] = 0
if solver == "liblinear" and multi_class == "multinomial":
pytest.skip("'multinomial' is unavailable when solver='liblinear'")
lr = LogisticRegression(
max_iter=max_iter,
tol=1e-15,
multi_class=multi_class,
random_state=0,
solver=solver,
)
with pytest.warns(ConvergenceWarning, match=message):
lr.fit(X, y_bin)
assert lr.n_iter_[0] == max_iter
@pytest.mark.parametrize("solver", ["newton-cg", "liblinear", "sag", "saga", "lbfgs"])
def test_n_iter(solver):
# Test that self.n_iter_ has the correct format.
X, y = iris.data, iris.target
n_classes = np.unique(y).shape[0]
assert n_classes == 3
# Also generate a binary classification sub-problem.
y_bin = y.copy()
y_bin[y_bin == 2] = 0
n_Cs = 4
n_cv_fold = 2
# Binary classification case
clf = LogisticRegression(tol=1e-2, C=1.0, solver=solver, random_state=42)
clf.fit(X, y_bin)
assert clf.n_iter_.shape == (1,)
clf_cv = LogisticRegressionCV(
tol=1e-2, solver=solver, Cs=n_Cs, cv=n_cv_fold, random_state=42
)
clf_cv.fit(X, y_bin)
assert clf_cv.n_iter_.shape == (1, n_cv_fold, n_Cs)
# OvR case
clf.set_params(multi_class="ovr").fit(X, y)
assert clf.n_iter_.shape == (n_classes,)
clf_cv.set_params(multi_class="ovr").fit(X, y)
assert clf_cv.n_iter_.shape == (n_classes, n_cv_fold, n_Cs)
# multinomial case
if solver == "liblinear":
# This solver only supports one-vs-rest multiclass classification.
return
# When using the multinomial objective function, there is a single
# optimization problem to solve for all classes at once:
clf.set_params(multi_class="multinomial").fit(X, y)
assert clf.n_iter_.shape == (1,)
clf_cv.set_params(multi_class="multinomial").fit(X, y)
assert clf_cv.n_iter_.shape == (1, n_cv_fold, n_Cs)
@pytest.mark.parametrize("solver", ("newton-cg", "sag", "saga", "lbfgs"))
@pytest.mark.parametrize("warm_start", (True, False))
@pytest.mark.parametrize("fit_intercept", (True, False))
@pytest.mark.parametrize("multi_class", ["ovr", "multinomial"])
def test_warm_start(solver, warm_start, fit_intercept, multi_class):
# A 1-iteration second fit on same data should give almost same result
# with warm starting, and quite different result without warm starting.
# Warm starting does not work with liblinear solver.
X, y = iris.data, iris.target
clf = LogisticRegression(
tol=1e-4,
multi_class=multi_class,
warm_start=warm_start,
solver=solver,
random_state=42,
fit_intercept=fit_intercept,
)
with ignore_warnings(category=ConvergenceWarning):
clf.fit(X, y)
coef_1 = clf.coef_
clf.max_iter = 1
clf.fit(X, y)
cum_diff = np.sum(np.abs(coef_1 - clf.coef_))
msg = (
"Warm starting issue with %s solver in %s mode "
"with fit_intercept=%s and warm_start=%s"
% (solver, multi_class, str(fit_intercept), str(warm_start))
)
if warm_start:
assert 2.0 > cum_diff, msg
else:
assert cum_diff > 2.0, msg
def test_saga_vs_liblinear():
iris = load_iris()
X, y = iris.data, iris.target
X = np.concatenate([X] * 3)
y = np.concatenate([y] * 3)
X_bin = X[y <= 1]
y_bin = y[y <= 1] * 2 - 1
X_sparse, y_sparse = make_classification(
n_samples=50, n_features=20, random_state=0
)
X_sparse = sparse.csr_matrix(X_sparse)
for X, y in ((X_bin, y_bin), (X_sparse, y_sparse)):
for penalty in ["l1", "l2"]:
n_samples = X.shape[0]
# alpha=1e-3 is time consuming
for alpha in np.logspace(-1, 1, 3):
saga = LogisticRegression(
C=1.0 / (n_samples * alpha),
solver="saga",
multi_class="ovr",
max_iter=200,
fit_intercept=False,
penalty=penalty,
random_state=0,
tol=1e-24,
)
liblinear = LogisticRegression(
C=1.0 / (n_samples * alpha),
solver="liblinear",
multi_class="ovr",
max_iter=200,
fit_intercept=False,
penalty=penalty,
random_state=0,
tol=1e-24,
)
saga.fit(X, y)
liblinear.fit(X, y)
# Convergence for alpha=1e-3 is very slow
assert_array_almost_equal(saga.coef_, liblinear.coef_, 3)
@pytest.mark.parametrize("multi_class", ["ovr", "multinomial"])
@pytest.mark.parametrize("solver", ["newton-cg", "liblinear", "saga"])
@pytest.mark.parametrize("fit_intercept", [False, True])
def test_dtype_match(solver, multi_class, fit_intercept):
# Test that np.float32 input data is not cast to np.float64 when possible
# and that the output is approximately the same no matter the input format.
if solver == "liblinear" and multi_class == "multinomial":
pytest.skip("liblinear does not support multinomial logistic")
out32_type = np.float64 if solver == "liblinear" else np.float32
X_32 = np.array(X).astype(np.float32)
y_32 = np.array(Y1).astype(np.float32)
X_64 = | np.array(X) | numpy.array |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for recurrent layers functionality other than GRU, LSTM, SimpleRNN.
See also: lstm_test.py, gru_test.py, simplernn_test.py.
"""
import collections
import numpy as np
import tensorflow.compat.v2 as tf
from absl.testing import parameterized
import keras
from keras.engine import base_layer_utils
from keras.layers.rnn import gru
from keras.layers.rnn import gru_v1
from keras.layers.rnn import lstm
from keras.layers.rnn import lstm_v1
from keras.testing_infra import test_combinations
from keras.testing_infra import test_utils
from keras.utils import generic_utils
# isort: off
from tensorflow.python.training.tracking import (
util as trackable_util,
)
# Used for nested input/output/state RNN test.
NestedInput = collections.namedtuple("NestedInput", ["t1", "t2"])
NestedState = collections.namedtuple("NestedState", ["s1", "s2"])
@test_combinations.run_all_keras_modes
class RNNTest(test_combinations.TestCase):
def test_minimal_rnn_cell_non_layer(self):
class MinimalRNNCell:
def __init__(self, units, input_dim):
self.units = units
self.state_size = units
self.kernel = keras.backend.variable(
np.random.random((input_dim, units))
)
def call(self, inputs, states):
prev_output = states[0]
output = keras.backend.dot(inputs, self.kernel) + prev_output
return output, [output]
# Basic test case.
cell = MinimalRNNCell(32, 5)
x = keras.Input((None, 5))
layer = keras.layers.RNN(cell)
y = layer(x)
model = keras.models.Model(x, y)
model.compile(
optimizer="rmsprop",
loss="mse",
run_eagerly=test_utils.should_run_eagerly(),
)
model.train_on_batch(np.zeros((6, 5, 5)), np.zeros((6, 32)))
# Test stacking.
cells = [
MinimalRNNCell(8, 5),
MinimalRNNCell(32, 8),
MinimalRNNCell(32, 32),
]
layer = keras.layers.RNN(cells)
y = layer(x)
model = keras.models.Model(x, y)
model.compile(
optimizer="rmsprop",
loss="mse",
run_eagerly=test_utils.should_run_eagerly(),
)
model.train_on_batch(np.zeros((6, 5, 5)), np.zeros((6, 32)))
def test_minimal_rnn_cell_non_layer_multiple_states(self):
class MinimalRNNCell:
def __init__(self, units, input_dim):
self.units = units
self.state_size = (units, units)
self.kernel = keras.backend.variable(
np.random.random((input_dim, units))
)
def call(self, inputs, states):
prev_output_1 = states[0]
prev_output_2 = states[1]
output = keras.backend.dot(inputs, self.kernel)
output += prev_output_1
output -= prev_output_2
return output, [output * 2, output * 3]
# Basic test case.
cell = MinimalRNNCell(32, 5)
x = keras.Input((None, 5))
layer = keras.layers.RNN(cell)
y = layer(x)
model = keras.models.Model(x, y)
model.compile(
optimizer="rmsprop",
loss="mse",
run_eagerly=test_utils.should_run_eagerly(),
)
model.train_on_batch(np.zeros((6, 5, 5)), np.zeros((6, 32)))
# Test stacking.
cells = [
MinimalRNNCell(8, 5),
MinimalRNNCell(16, 8),
MinimalRNNCell(32, 16),
]
layer = keras.layers.RNN(cells)
self.assertEqual(layer.cell.state_size, ((8, 8), (16, 16), (32, 32)))
self.assertEqual(layer.cell.output_size, 32)
y = layer(x)
model = keras.models.Model(x, y)
model.compile(
optimizer="rmsprop",
loss="mse",
run_eagerly=test_utils.should_run_eagerly(),
)
model.train_on_batch(np.zeros((6, 5, 5)), np.zeros((6, 32)))
def test_minimal_rnn_cell_layer(self):
class MinimalRNNCell(keras.layers.Layer):
def __init__(self, units, **kwargs):
self.units = units
self.state_size = units
super().__init__(**kwargs)
def build(self, input_shape):
self.kernel = self.add_weight(
shape=(input_shape[-1], self.units),
initializer="uniform",
name="kernel",
)
self.recurrent_kernel = self.add_weight(
shape=(self.units, self.units),
initializer="uniform",
name="recurrent_kernel",
)
self.built = True
def call(self, inputs, states):
prev_output = states[0]
h = keras.backend.dot(inputs, self.kernel)
output = h + keras.backend.dot(
prev_output, self.recurrent_kernel
)
return output, [output]
def get_config(self):
config = {"units": self.units}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
# Test basic case.
x = keras.Input((None, 5))
cell = MinimalRNNCell(32)
layer = keras.layers.RNN(cell)
y = layer(x)
model = keras.models.Model(x, y)
model.compile(
optimizer="rmsprop",
loss="mse",
run_eagerly=test_utils.should_run_eagerly(),
)
model.train_on_batch(np.zeros((6, 5, 5)), np.zeros((6, 32)))
# Test basic case serialization.
x_np = np.random.random((6, 5, 5))
y_np = model.predict(x_np)
weights = model.get_weights()
config = layer.get_config()
with generic_utils.CustomObjectScope(
{"MinimalRNNCell": MinimalRNNCell}
):
layer = keras.layers.RNN.from_config(config)
y = layer(x)
model = keras.models.Model(x, y)
model.set_weights(weights)
y_np_2 = model.predict(x_np)
self.assertAllClose(y_np, y_np_2, atol=1e-4)
# Test stacking.
cells = [MinimalRNNCell(8), MinimalRNNCell(12), MinimalRNNCell(32)]
layer = keras.layers.RNN(cells)
y = layer(x)
model = keras.models.Model(x, y)
model.compile(
optimizer="rmsprop",
loss="mse",
run_eagerly=test_utils.should_run_eagerly(),
)
model.train_on_batch(np.zeros((6, 5, 5)), np.zeros((6, 32)))
# Test stacked RNN serialization.
x_np = np.random.random((6, 5, 5))
y_np = model.predict(x_np)
weights = model.get_weights()
config = layer.get_config()
with generic_utils.CustomObjectScope(
{"MinimalRNNCell": MinimalRNNCell}
):
layer = keras.layers.RNN.from_config(config)
y = layer(x)
model = keras.models.Model(x, y)
model.set_weights(weights)
y_np_2 = model.predict(x_np)
self.assertAllClose(y_np, y_np_2, atol=1e-4)
def test_minimal_rnn_cell_abstract_rnn_cell(self):
class MinimalRNNCell(keras.layers.AbstractRNNCell):
def __init__(self, units, **kwargs):
self.units = units
super().__init__(**kwargs)
@property
def state_size(self):
return self.units
def build(self, input_shape):
self.kernel = self.add_weight(
shape=(input_shape[-1], self.units),
initializer="uniform",
name="kernel",
)
self.recurrent_kernel = self.add_weight(
shape=(self.units, self.units),
initializer="uniform",
name="recurrent_kernel",
)
self.built = True
def call(self, inputs, states):
prev_output = states[0]
h = keras.backend.dot(inputs, self.kernel)
output = h + keras.backend.dot(
prev_output, self.recurrent_kernel
)
return output, output
@property
def output_size(self):
return self.units
cell = MinimalRNNCell(32)
x = keras.Input((None, 5))
layer = keras.layers.RNN(cell)
y = layer(x)
model = keras.models.Model(x, y)
model.compile(
optimizer="rmsprop",
loss="mse",
run_eagerly=test_utils.should_run_eagerly(),
)
model.train_on_batch(np.zeros((6, 5, 5)), np.zeros((6, 32)))
# Test stacking.
cells = [MinimalRNNCell(8), MinimalRNNCell(16), MinimalRNNCell(32)]
layer = keras.layers.RNN(cells)
y = layer(x)
model = keras.models.Model(x, y)
model.compile(
optimizer="rmsprop",
loss="mse",
run_eagerly=test_utils.should_run_eagerly(),
)
model.train_on_batch(np.zeros((6, 5, 5)), np.zeros((6, 32)))
def test_rnn_with_time_major(self):
batch = 10
time_step = 5
embedding_dim = 4
units = 3
# Test basic case.
x = keras.Input((time_step, embedding_dim))
time_major_x = keras.layers.Lambda(
lambda t: tf.transpose(t, [1, 0, 2])
)(x)
layer = keras.layers.SimpleRNN(
units, time_major=True, return_sequences=True
)
self.assertEqual(
layer.compute_output_shape(
(time_step, None, embedding_dim)
).as_list(),
[time_step, None, units],
)
y = layer(time_major_x)
self.assertEqual(layer.output_shape, (time_step, None, units))
y = keras.layers.Lambda(lambda t: tf.transpose(t, [1, 0, 2]))(y)
model = keras.models.Model(x, y)
model.compile(
optimizer="rmsprop",
loss="mse",
run_eagerly=test_utils.should_run_eagerly(),
)
model.train_on_batch(
np.zeros((batch, time_step, embedding_dim)),
np.zeros((batch, time_step, units)),
)
# Test stacking.
x = keras.Input((time_step, embedding_dim))
time_major_x = keras.layers.Lambda(
lambda t: tf.transpose(t, [1, 0, 2])
)(x)
cell_units = [10, 8, 6]
cells = [keras.layers.SimpleRNNCell(cell_units[i]) for i in range(3)]
layer = keras.layers.RNN(cells, time_major=True, return_sequences=True)
y = layer(time_major_x)
self.assertEqual(layer.output_shape, (time_step, None, cell_units[-1]))
y = keras.layers.Lambda(lambda t: tf.transpose(t, [1, 0, 2]))(y)
model = keras.models.Model(x, y)
model.compile(
optimizer="rmsprop",
loss="mse",
run_eagerly=test_utils.should_run_eagerly(),
)
model.train_on_batch(
np.zeros((batch, time_step, embedding_dim)),
np.zeros((batch, time_step, cell_units[-1])),
)
# Test masking.
x = keras.Input((time_step, embedding_dim))
time_major = keras.layers.Lambda(lambda t: tf.transpose(t, [1, 0, 2]))(
x
)
mask = keras.layers.Masking()(time_major)
rnn = keras.layers.SimpleRNN(
units, time_major=True, return_sequences=True
)(mask)
y = keras.layers.Lambda(lambda t: tf.transpose(t, [1, 0, 2]))(rnn)
model = keras.models.Model(x, y)
model.compile(
optimizer="rmsprop",
loss="mse",
run_eagerly=test_utils.should_run_eagerly(),
)
model.train_on_batch(
| np.zeros((batch, time_step, embedding_dim)) | numpy.zeros |
#
# Copyright 2019 <NAME>, <NAME>, <NAME>,
# <NAME>, <NAME>, <NAME>, <NAME>,
# <NAME>, <NAME>, <NAME>, <NAME>,
# <NAME>, <NAME>, <NAME>, <NAME>, <NAME>
#
# This file is part of acados.
#
# The 2-Clause BSD License
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.;
#
from acados_template import *
import acados_template as at
import numpy as nmp
from ctypes import *
import matplotlib
import matplotlib.pyplot as plt
import scipy.linalg
import json
CODE_GEN = 1
COMPILE = 1
FORMULATION = 2 # 0 for hexagon 1 for sphere 2 SCQP sphere
i_d_ref = 1.484
i_q_ref = 1.429
w_val = 200
i_d_ref = -20
i_q_ref = 20
w_val = 300
udc = 580
u_max = 2/3*udc
# fitted psi_d map
def psi_d_num(x,y):
# This function was generated by the Symbolic Math Toolbox version 8.0.
# 07-Feb-2018 23:07:49
psi_d_expression = x*(-4.215858085639979e-3) + \
exp(y**2*(-8.413493151721978e-5))*atan(x*1.416834085282644e-1)*8.834738694115108e-1
return psi_d_expression
def psi_q_num(x,y):
# This function was generated by the Symbolic Math Toolbox version 8.0.
# 07-Feb-2018 23:07:50
psi_q_expression = y*1.04488335702649e-2+exp(x**2*(-1.0/7.2e1))*atan(y)*6.649036351062812e-2
return psi_q_expression
psi_d_ref = psi_d_num(i_d_ref, i_q_ref)
psi_q_ref = psi_q_num(i_d_ref, i_q_ref)
# compute steady-state u
Rs = 0.4
u_d_ref = Rs*i_d_ref - w_val*psi_q_ref
u_q_ref = Rs*i_q_ref + w_val*psi_d_ref
def export_dae_model():
model_name = 'rsm'
# constants
theta = 0.0352
Rs = 0.4
m_load = 0.0
J = nmp.array([[0, -1], [1, 0]])
# set up states
psi_d = SX.sym('psi_d')
psi_q = SX.sym('psi_q')
x = vertcat(psi_d, psi_q)
# set up controls
u_d = SX.sym('u_d')
u_q = SX.sym('u_q')
u = vertcat(u_d, u_q)
# set up algebraic variables
i_d = SX.sym('i_d')
i_q = SX.sym('i_q')
z = vertcat(i_d, i_q)
# set up xdot
psi_d_dot = SX.sym('psi_d_dot')
psi_q_dot = SX.sym('psi_q_dot')
xdot = vertcat(psi_d_dot, psi_q_dot)
# set up parameters
w = SX.sym('w') # speed
dist_d = SX.sym('dist_d') # d disturbance
dist_q = SX.sym('dist_q') # q disturbance
p = vertcat(w, dist_d, dist_q)
# build flux expression
Psi = vertcat(psi_d_num(i_d, i_q), psi_q_num(i_d, i_q))
# dynamics
f_impl = vertcat( psi_d_dot - u_d + Rs*i_d - w*psi_q - dist_d, \
psi_q_dot - u_q + Rs*i_q + w*psi_d - dist_q, \
psi_d - Psi[0], \
psi_q - Psi[1])
model = acados_dae()
model.f_impl_expr = f_impl
model.f_expl_expr = []
model.x = x
model.xdot = xdot
model.u = u
model.z = z
model.p = p
model.name = model_name
return model
def export_voltage_sphere_con():
con_name = 'v_sphere'
# set up states
psi_d = SX.sym('psi_d')
psi_q = SX.sym('psi_q')
x = vertcat(psi_d, psi_q)
# set up controls
u_d = SX.sym('u_d')
u_q = SX.sym('u_q')
u = vertcat(u_d, u_q)
# voltage sphere
constraint = acados_constraint()
constraint.expr = u_d**2 + u_q**2
# constraint.expr = u_d + u_q
constraint.x = x
constraint.u = u
constraint.nc = 1
constraint.name = con_name
return constraint
def export_nonlinear_part_voltage_constraint():
con_name = 'v_sphere_nl'
# set up states
psi_d = SX.sym('psi_d')
psi_q = SX.sym('psi_q')
x = vertcat(psi_d, psi_q)
# set up controls
u_d = SX.sym('u_d')
u_q = SX.sym('u_q')
u = vertcat(u_d, u_q)
# voltage sphere
constraint = acados_constraint()
constraint.expr = vertcat(u_d, u_q)
# constraint.expr = u_d + u_q
constraint.x = x
constraint.u = u
constraint.nc = 2
constraint.name = con_name
return constraint
def get_general_constraints_DC(u_max):
# polytopic constraint on the input
r = u_max
x1 = r
y1 = 0
x2 = r*cos(pi/3)
y2 = r*sin(pi/3)
q1 = -(y2 - y1/x1*x2)/(1-x2/x1)
m1 = -(y1 + q1)/x1
# q1 <= uq + m1*ud <= -q1
# q1 <= uq - m1*ud <= -q1
# box constraints
m2 = 0
q2 = r*sin(pi/3)
# -q2 <= uq <= q2
# form D and C matrices
# (acados C interface works with column major format)
D = nmp.transpose(nmp.array([[1, m1],[1, -m1]]))
# D = nmp.array([[1, m1],[1, -m1]])
# TODO(andrea): ???
# D = nmp.transpose(nmp.array([[m1, 1],[-m1, 1]]))
D = nmp.array([[m1, 1],[-m1, 1]])
C = nmp.transpose(nmp.array([[0, 0], [0, 0]]))
ug = nmp.array([-q1, -q1])
lg = nmp.array([+q1, +q1])
lbu = nmp.array([-q2])
ubu = nmp.array([+q2])
res = dict()
res["D"] = D
res["C"] = C
res["lg"] = lg
res["ug"] = ug
res["lbu"] = lbu
res["ubu"] = ubu
return res
# create render arguments
ra = acados_ocp_nlp()
# export model
model = export_dae_model()
# export constraint description
constraint = export_voltage_sphere_con()
constraint_nl = export_nonlinear_part_voltage_constraint()
# set model_name
ra.model_name = model.name
if FORMULATION == 1:
# constraints name
ra.con_h_name = constraint.name
if FORMULATION == 2:
# constraints name
ra.con_h_name = constraint.name
ra.con_p_name = constraint_nl.name
# Ts = 0.0016
# Ts = 0.0012
Ts = 0.0008
# Ts = 0.0004
nx = model.x.size()[0]
nu = model.u.size()[0]
nz = model.z.size()[0]
np = model.p.size()[0]
ny = nu + nx
ny_e = nx
N = 2
Tf = N*Ts
# set ocp_nlp_dimensions
nlp_dims = ra.dims
nlp_dims.nx = nx
nlp_dims.nz = nz
nlp_dims.ny = ny
nlp_dims.ny_e = ny_e
nlp_dims.nbx = 0
nlp_dims.nbu = 1
if FORMULATION == 0:
nlp_dims.nbu = 1
nlp_dims.ng = 2
if FORMULATION == 1:
nlp_dims.ng = 0
nlp_dims.nh = 1
if FORMULATION == 2:
nlp_dims.ng = 2
nlp_dims.npd = 2
nlp_dims.nh = 1
nlp_dims.nh_e = 0
# nlp_dims.nbu = 2
# nlp_dims.ng = 2
# nlp_dims.ng = 0
nlp_dims.ng_e = 0
nlp_dims.nbx_e = 0
nlp_dims.nu = nu
nlp_dims.np = np
nlp_dims.N = N
# nlp_dims.npd_e = -1
# nlp_dims.nh = 1
# set weighting matrices
nlp_cost = ra.cost
Q = nmp.eye(nx)
Q[0,0] = 5e2*Tf/N
Q[1,1] = 5e2*Tf/N
R = nmp.eye(nu)
R[0,0] = 1e-4*Tf/N
R[1,1] = 1e-4*Tf/N
# R[0,0] = 1e1
# R[1,1] = 1e1
nlp_cost.W = scipy.linalg.block_diag(Q, R)
Vx = nmp.zeros((ny, nx))
Vx[0,0] = 1.0
Vx[1,1] = 1.0
nlp_cost.Vx = Vx
Vu = nmp.zeros((ny, nu))
Vu[2,0] = 1.0
Vu[3,1] = 1.0
nlp_cost.Vu = Vu
Vz = nmp.zeros((ny, nz))
Vz[0,0] = 0.0
Vz[1,1] = 0.0
nlp_cost.Vz = Vz
Q_e = | nmp.eye(nx) | numpy.eye |
#!python2
# -*- coding: utf-8 -*-
"""
Created on Wed Jan 11 14:33:54 2017
@author: lansford
"""
from __future__ import division
import os
from pdos_overlap.coordination import get_geometric_data
import numpy as np
import matplotlib.pyplot as plt
from pdos_overlap.vasp_dos import VASP_DOS
from pdos_overlap.vasp_dos import get_all_VASP_files
from pdos_overlap.plotting_tools import set_figure_settings
from scipy.stats import linregress
set_figure_settings('paper')
Downloads_folder = os.path.join(os.path.expanduser("~"),'Downloads')
GCNList = []
atom_type = []
band_list = []
band_width_list = []
occupied_band_list = []
unoccupied_band_list = []
filling_list = []
second_moment_list = []
bond_energy_list = []
DOSCAR_files, CONTCAR_files = get_all_VASP_files(\
r'C:\Users\lansf\Documents\Data\PROBE_PDOS\lobster_files_(N+1)bands\nanoparticles_noW')
for DOSCAR, CONTCAR in zip(DOSCAR_files, CONTCAR_files):
indices, GCNs, atom_types = get_geometric_data(CONTCAR)
GCNList += GCNs.tolist()
atom_type += atom_types.tolist()
# read and return densityofstates object
PDOS = VASP_DOS(DOSCAR)
for atom_index in indices:
band_center = PDOS.get_band_center(atom_index, ['s','d']\
, sum_density=True) - PDOS.e_fermi
occupied_band_center = PDOS.get_band_center(atom_index, ['s','d']\
, sum_density=True, max_energy=PDOS.e_fermi) - PDOS.e_fermi
unoccupied_band_center = PDOS.get_band_center(atom_index, ['s','d']\
, sum_density=True, min_energy=PDOS.e_fermi) - PDOS.e_fermi
band_width = PDOS.get_center_width(PDOS.e_fermi, atom_index, ['s','d']\
, sum_density=True)
second_moment = PDOS.get_second_moment(atom_index, ['s','d']\
, sum_density=True)
bond_energy = PDOS.get_bond_energy(atom_index, ['s','d']\
, sum_density=True)
filling = PDOS.get_filling(atom_index, ['s','d']\
, sum_density=True, max_energy=PDOS.e_fermi)
band_list.append(band_center)
band_width_list.append(band_width)
occupied_band_list.append(occupied_band_center)
unoccupied_band_list.append(unoccupied_band_center)
filling_list.append(filling)
second_moment_list.append(second_moment)
bond_energy_list.append(bond_energy)
GCNList = np.array(GCNList)
atom_type = np.array(atom_type)
band_list = np.array(band_list).T
band_width_list = np.array(band_width_list).T
occupied_band_list = np.array(occupied_band_list).T
unoccupied_band_list = np.array(unoccupied_band_list).T
filling_list = np.array(filling_list).T
second_moment_list = np.array(second_moment_list).T
bond_energy_list = np.array(bond_energy_list).T
#plotting scaling of band center with GCN for surface sites
colors = ['b', 'r']
plt.figure(figsize=(3.5,3.2))
Efit = []
for count, color in enumerate(colors):
Efit.append(np.polyfit(GCNList[atom_type=='surface']\
,filling_list[count][atom_type=='surface'], 1))
plt.plot(np.sort(GCNList[atom_type=='surface'])\
, np.poly1d(Efit[count])\
(np.sort(GCNList[atom_type=='surface'])), color + '--')
for count, color in enumerate(colors):
plt.plot(GCNList[atom_type=='surface'], filling_list[count][atom_type=='surface'], color + 'o')
plt.legend([r'${filling}_{s}$=%.2fGCN + %.2f states' %(Efit[0][0],Efit[0][1])
,r'${filling}_{d}$=%.2fGCN + %.2f states' %(Efit[1][0],Efit[1][1])]
,loc='best',frameon=False)
plt.xlabel('Generalized coordination number (GCN)')
plt.ylabel('Filling [states]')
plt.show()
#plotting scaling of band center with GCN
fig = plt.figure(figsize=(7.2,5),dpi=400)
axes = fig.subplots(nrows=2, ncols=2)
#plotting function
Efit = []
for count, color in enumerate(colors):
slope, intercept, r_value, p_value, std_err = linregress(GCNList, band_list[count])
Efit.append([slope, intercept])
print('band center R^2 value and std_err')
print(r_value**2)
print(std_err)
axes[0,0].plot(np.sort(GCNList), np.poly1d(Efit[count])( | np.sort(GCNList) | numpy.sort |
"""
Experiment utilities.
"""
from common.log import log, LogLevel
import math
import models
import numpy
import torch
def training_arguments(parser):
"""
Default training arguments.
:param parser: argument parser
:type parser: argparse.ArgumentParser
"""
parser.add_argument('-n', '--normalization', type=str, dest='normalization', default='')
parser.add_argument('-a', '--activation', type=str, dest='activation', default='relu')
parser.add_argument('--whiten', action='store_true', default=False)
parser.add_argument('--dropout', action='store_true', default=False)
parser.add_argument('--init_scale', default=1, type=float)
parser.add_argument('--scale', default=1, type=float)
parser.add_argument('--rescale', action='store_true', default=False)
parser.add_argument('--channels', default=32, type=int)
parser.add_argument('--clipping', default=None, type=float)
def training_argument_list(args):
"""
Get default training parameters.
:param args: arguments
:type args: [str]
:return: arguments
:rtype: [str]
"""
training_args = [
'-n=%s' % str(args.normalization),
'-a=%s' % str(args.activation),
'--init_scale=%s' % str(args.init_scale),
'--channels=%s' % str(args.channels),
'--scale=%s' % str(args.scale),
]
if args.clipping is not None:
training_args += ['--clipping=%s' % str(args.clipping)]
if args.whiten:
training_args += ['--whiten']
if args.dropout:
training_args += ['--dropout']
return training_args
def get_training_directory(training_config, args, suffix=''):
"""
Get training directory based on training arguments.
:param training_config: training configuration
:type training_config: common.experiments.config.NormalTrainingConfig
:param args: arguments
:param suffix: suffix to use for directory
:type suffix: str
:return: directory name
:rtype: str
"""
init_scale = args.init_scale
scale = args.scale
clipping = args.clipping
channels = args.channels
whiten = args.whiten
dropout = args.dropout
architecture = args.architecture
normalization = args.normalization
if normalization == '':
log('[Warning] no normalization', LogLevel.WARNING)
activation = args.activation
if activation == '':
log('[Warning] no activation', LogLevel.WARNING)
# just allows to call various scripts sequentially without caring about resetting the original directory
if getattr(training_config, 'original_directory', None) is None:
training_config.original_directory = training_config.directory
directory = training_config.original_directory
if suffix != '':
directory += '_' + suffix
directory += '_' + architecture
if normalization != '':
directory += '_' + normalization
if activation != 'relu':
directory += '_' + activation
if whiten:
directory += '_whiten'
if dropout:
directory += '_dropout'
if scale != 1:
directory += ('_scale%g' % scale).replace('.', '')
if clipping is not None:
directory += ('_clipping%g' % clipping).replace('.', '')
if not math.isclose(init_scale, 1.):
directory += ('_%g' % init_scale).replace('.', '')
directory += '_%d' % channels
return directory
def get_get_model(args, config):
"""
Get a function to return and initialize the model.
:param args: arguments
:param config: training configuration
:return: callable to get model
"""
channels = args.channels
whiten = args.whiten
dropout = args.dropout
init_scale = args.init_scale
scale = args.scale
clipping = args.clipping
architecture = args.architecture
normalization = args.normalization
activation = args.activation
def set_whiten(model, resolution):
mean = numpy.zeros(resolution[0])
std = numpy.zeros(resolution[0])
for c in range(resolution[0]):
mean[c] = numpy.mean(config.trainset.images[:, :, :, c])
std[c] = | numpy.std(config.trainset.images[:, :, :, c]) | numpy.std |
import os
import numpy as np
from random import shuffle
from collections import namedtuple
from glob import glob
import tensorflow as tf
from tensorflow.keras.optimizers import Adam
from tf2_module import build_generator, build_discriminator_classifier, softmax_criterion
from tf2_utils import get_now_datetime, save_midis
class Classifier(object):
def __init__(self, args):
self.dataset_A_dir = args.dataset_A_dir
self.dataset_B_dir = args.dataset_B_dir
self.sample_dir = args.sample_dir
self.batch_size = args.batch_size
self.time_step = args.time_step
self.pitch_range = args.pitch_range
self.input_c_dim = args.input_nc # number of input image channels
self.sigma_c = args.sigma_c
self.sigma_d = args.sigma_d
self.lr = args.lr
self.model = args.model
self.generator = build_generator
self.discriminator = build_discriminator_classifier
OPTIONS = namedtuple('OPTIONS', 'batch_size '
'time_step '
'input_nc '
'output_nc '
'pitch_range '
'gf_dim '
'df_dim '
'is_training')
self.options = OPTIONS._make((args.batch_size,
args.time_step,
args.input_nc,
args.output_nc,
args.pitch_range,
args.ngf,
args.ndf,
args.phase == 'train'))
self.now_datetime = get_now_datetime()
self._build_model(args)
print("Initializing classifier...")
def _build_model(self, args):
# build classifier
self.classifier = self.discriminator(self.options,
name='Classifier')
# optimizer
self.classifier_optimizer = Adam(self.lr,
beta_1=args.beta1)
# checkpoints
model_name = "classifier.model"
model_dir = "classifier_{}2{}_{}_{}".format(self.dataset_A_dir,
self.dataset_B_dir,
self.now_datetime,
str(self.sigma_c))
self.checkpoint_dir = os.path.join(args.checkpoint_dir,
model_dir,
model_name)
if not os.path.exists(self.checkpoint_dir):
os.makedirs(self.checkpoint_dir)
self.checkpoint = tf.train.Checkpoint(classifier_optimizer=self.classifier_optimizer,
classifier=self.classifier)
self.checkpoint_manager = tf.train.CheckpointManager(self.checkpoint,
self.checkpoint_dir,
max_to_keep=5)
def train(self, args):
# create training list (origin data with corresponding label)
# Label for A is (1, 0), for B is (0, 1)
dataA = glob('./datasets/{}/train/*.*'.format(self.dataset_A_dir))
dataB = glob('./datasets/{}/train/*.*'.format(self.dataset_B_dir))
labelA = [(1.0, 0.0) for _ in range(len(dataA))]
labelB = [(0.0, 1.0) for _ in range(len(dataB))]
data_origin = dataA + dataB
label_origin = labelA + labelB
training_list = [pair for pair in zip(data_origin, label_origin)]
print('Successfully create training list!')
# create test list (origin data with corresponding label)
dataA = glob('./datasets/{}/test/*.*'.format(self.dataset_A_dir))
dataB = glob('./datasets/{}/test/*.*'.format(self.dataset_B_dir))
labelA = [(1.0, 0.0) for _ in range(len(dataA))]
labelB = [(0.0, 1.0) for _ in range(len(dataB))]
data_origin = dataA + dataB
label_origin = labelA + labelB
testing_list = [pair for pair in zip(data_origin, label_origin)]
print('Successfully create testing list!')
data_test = [np.load(pair[0]) * 2. - 1. for pair in testing_list]
data_test = np.array(data_test).astype(np.float32)
gaussian_noise = np.random.normal(0,
self.sigma_c,
[data_test.shape[0],
data_test.shape[1],
data_test.shape[2],
data_test.shape[3]])
data_test += gaussian_noise
label_test = [pair[1] for pair in testing_list]
label_test = | np.array(label_test) | numpy.array |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue May 5 14:19:05 2020
Functions to attach the LCFS to carbon emissions, based on code by <NAME>
@author: lenakilian
"""
import numpy as np
import pandas as pd
import io_functions as io
df = pd.DataFrame
import demand_functions as dm
import copy as cp
def convert36to33(Y,concs_dict2,years):
Y2 = {}
for yr in years:
temp = np.dot(Y[yr],concs_dict2['C43_to_C40'])
Y2[yr] = df(temp, index = Y[yr].index, columns = concs_dict2['C43_to_C40'].columns)
return Y2
"""
def make_totals(hhspenddata, years, concs_dict2, total_Yhh_112):
coicop_exp_tot = {}
for yr in years:
coicop_exp_tot[yr] = np.sum(hhspenddata[yr].loc[:,'1.1.1.1':'192.168.127.12'],0)
coicop_exp_tot2 = {}
for yr in years:
corrector = np.zeros(shape = 307)
countstart = 0
countend = 0
for numb in range(0,33):
conc = concs_dict2[str(numb)+'a']
countend = np.sum(np.sum(conc))+countstart
lcf_subtotal = np.sum(np.dot(coicop_exp_tot[yr],conc)) #*52/1000)
required_subtotal = np.sum(total_Yhh_112[yr].iloc[:,numb])
correction_factor = required_subtotal/lcf_subtotal
for c in range(countstart,countend):
corrector[c] = correction_factor
countstart = countend
coicop_exp_tot2[yr] = np.dot(coicop_exp_tot[yr],np.diag(corrector))
return coicop_exp_tot2
"""
def expected_totals(hhspenddata, years, concs_dict2, total_Yhh_106):
coicop_exp_tot = {}
for year in years:
temp = np.sum(hhspenddata[year], 0)
corrector = np.zeros(shape = 307)
start = 0
end = 0
corrector = []
for i in range(0, 33):
conc = concs_dict2[str(i) + 'a']
end = len(conc.columns) + start
lcf_subtotal = np.sum(np.dot(temp, conc))
required_subtotal = np.sum(total_Yhh_106[year].iloc[:, i])
corrector += [required_subtotal/lcf_subtotal for i in range(start, end)]
start = end
coicop_exp_tot[year] = np.dot(temp, np.diag(corrector))
return(coicop_exp_tot)
def make_y_hh_307(Y,coicop_exp_tot,years,concs_dict2,meta):
yhh_wide = {}
for yr in years:
temp = np.zeros(shape = [meta['fd']['len_idx'],307])
countstart = 0
countend = 0
col = []
for a in range(0,33):
conc = np.tile(concs_dict2[str(a)],(meta['reg']['len'],1))
countend = np.sum(np.sum(concs_dict2[str(a)+'a']))+countstart
category_total = np.dot(coicop_exp_tot[yr],concs_dict2[str(a)+'a'])
#test1 = np.dot(np.diag(Y[yr].iloc[:,a]),conc)
test1 = np.dot(conc,np.diag(category_total))
#test2 = np.tile(np.dot(Y[yr].iloc[:,a],conc),(1590,1))
test2 = np.transpose(np.tile(np.dot(conc,category_total),(np.size(conc,1),1)))
test3 = test1/test2
test3 = np.nan_to_num(test3, copy=True)
#num = np.dot(conc,np.diag(category_total))
#test4 = np.multiply(num,test3)
test4 = np.dot(np.diag(Y[yr].iloc[:,a]),test3)
#den = np.dot(np.diag(np.sum(num,1)),concs_dict2[str(a)])
#prop = np.divide(num,den)
#prop = np.nan_to_num(prop, copy=True)
#temp[:,countstart:countend] = (np.dot(np.diag(total_Yhh_106[yr].iloc[:,a]),prop))
temp[:,countstart:countend] = test4
col[countstart:countend] = concs_dict2[str(a) + 'a'].columns
countstart = countend
yhh_wide[yr] = df(temp, columns = col)
return yhh_wide
def make_y_hh_prop(Y,total_Yhh_106,meta,years):
yhh_prop = {}
for yr in years:
temp = np.zeros(shape=(len(Y[yr])))
for r in range(0,meta['reg']['len']):
temp[r*106:(r+1)*106] = np.divide(np.sum(Y[yr].iloc[r*106:(r+1)*106,0:36],1),np.sum(total_Yhh_106[yr],1))
np.nan_to_num(temp, copy = False)
yhh_prop[yr] = temp
return yhh_prop
def make_new_Y(Y,yhh_wide,meta,years):
newY = {}
col = []
for yr in years:
temp = np.zeros(shape=[len(Y[yr]),314])
temp[:,0:307] = yhh_wide[yr]
temp[:,307:314] = Y[yr].iloc[:,33:40]
col[0:307] = yhh_wide[yr].columns
col[307:314] = Y[yr].iloc[:,33:40].columns
newY[yr] = df(temp, index = Y[yr].index, columns = col)
return newY
def make_ylcf_props(hhspenddata,years):
ylcf_props = {}
for yr in years:
totalspend = np.sum(hhspenddata[yr].loc[:,'1.1.1.1':'192.168.127.12'])
temp = np.divide(hhspenddata[yr].loc[:,'1.1.1.1':'192.168.127.12'],np.tile(totalspend,[len(hhspenddata[yr]),1]))
np.nan_to_num(temp, copy = False)
ylcf_props[yr] = df(temp, index = hhspenddata[yr].index)
return ylcf_props
def makefoot(S,U,Y,stressor,years):
footbyCOICOP = {}
for yr in years:
temp = np.zeros(shape = 307)
Z = io.make_Z_from_S_U(S[yr],U[yr])
bigY = np.zeros(shape = [np.size(Y[yr],0)*2,np.size(Y[yr],1)])
bigY[np.size(Y[yr],0):np.size(Y[yr],0)*2,0:] = Y[yr]
x = io.make_x(Z,bigY)
L = io.make_L(Z,x)
bigstressor = np.zeros(shape = [np.size(Y[yr],0)*2,1])
bigstressor[0:np.size(Y[yr],0),:] = stressor[yr]
e = np.sum(bigstressor,1)/x
eL = np.dot(e,L)
for a in range(0,307):
temp[a] = np.dot(eL,bigY[:,a])
footbyCOICOP[yr] = temp
return footbyCOICOP
def makelcffootdata(domfootbyLCF_nrg,impfootbyLCF_nrg,hhspenddata,years):
data = {}
for yr in years:
temp = np.zeros(shape = [len(domfootbyLCF_nrg[yr]),307+307+17])
cols = ['index','weight','age','sex','ethnicity','number of people','household type','dwelling type','tenure type','GOR','OAC','income','income tax','Soc-ec','no of rooms','cum sum','income rank',]
temp[:,0] = hhspenddata[yr].index
temp[:,1:10] = hhspenddata[yr].loc[:,'weight':'GOR modified']
for a in range(0,len(hhspenddata[yr])):
if hhspenddata[yr].iloc[a,9] == ' ':
temp[a,10] = 0
else:
temp[a,10] = int(hhspenddata[yr].iloc[a,9])
temp[:,11:15] = hhspenddata[yr].loc[:,'Income anonymised':'rooms in accommodation']
cols[17:324] = hhspenddata[yr].loc[:,'1.1.1.1':'192.168.127.12'].columns
cols[341:631] = hhspenddata[yr].loc[:,'1.1.1.1':'192.168.127.12'].columns
temp[:,17:17+307] = domfootbyLCF_nrg[yr][:,0:307]
temp[:,17+307:17+307+307] = impfootbyLCF_nrg[yr][:,0:307]
temp2 = df(temp,columns = cols)
temp3 = temp2.sort_values('income')
weightsum = np.sum(temp3.loc[:,'weight'])
weightsum20 = weightsum/20
temp3.iloc[0,15] = temp3.iloc[0,1]
for a in range(1,len(domfootbyLCF_nrg[yr])):
temp3.iloc[a,15] = temp3.iloc[a-1,15] + temp3.iloc[a,1]
a=0
for b in range(0,len(domfootbyLCF_nrg[yr])):
if temp3.iloc[b,15]<(a+1)*weightsum20:
temp3.iloc[b,16] = a
else:
a=a+1
temp3.iloc[b,16] = a-1
data[yr] = temp3
return data
def lcfs_analysis(exp_data, concs_dict, concs_dict2, og_Y, S, U, meta, ghg, uk_ghg_direct):
Y = cp.copy(og_Y)
hhdspend_uk = {}
for year in list(exp_data.keys()):
temp = exp_data[year].loc[:,'1.1.1.1':'192.168.127.12']
hhdspend_uk[year] = temp.apply(lambda x: x * exp_data[year]['pop'])
hhdspend_uk[year].index = exp_data[year].index
# use concs
temp = | np.dot(Y[year], concs_dict2['C43_to_C40']) | numpy.dot |
import colorsys
import numpy as np
def float_to_uint8(float_color):
"""
Converts a float color (0 to 1.0) to uint8 (0 to 255)
"""
return tuple(map(lambda x: int(255.0 * x), float_color))
def uint8_to_float(uint8_color):
"""
Converts a uint8 color (0 to 255) to float (0 to 1.0)
"""
return tuple(map(lambda x: float(x) / 255.0, uint8_color))
def rgb_uint8_to_hsv_float(rgb_color):
return colorsys.rgb_to_hsv(*uint8_to_float(rgb_color))
def hsv_float_to_rgb_uint8(hsv_color):
return float_to_uint8(colorsys.hsv_to_rgb(*hsv_color))
def clip(low, input, high):
return min(max(input, low), high)
def blend_to_buffer(source, destination, progress, mode):
h1,l1,s1 = source.T
h2,l2,s2 = destination.T
if mode == 'overwrite':
not_dark = l1 > 0.0
destination[not_dark] = source[not_dark]
else:
raise NotImplementedError
return destination
def hls_blend(start, end, output_buffer, progress, mode, fade_length=1.0, ease_power=0.5):
p = abs(progress)
startPower = (1.0 - p) / fade_length
startPower = clip(0.0, startPower, 1.0)
startPower = pow(startPower, ease_power)
endPower = p / fade_length
endPower = clip(0.0, endPower, 1.0)
endPower = pow(endPower, ease_power)
h1,l1,s1 = start.T
h2,l2,s2 = end.T
np.clip(l1,0,1,l1)
np.clip(l2,0,1,l2)
np.clip(s1,0,1,s1)
np.clip(s2,0,1,s2)
startWeight = (1.0 - 2 * np.abs(0.5 - l1)) * s1
endWeight = (1.0 - 2 * np.abs(0.5 - l2)) * s2
s = (s1 * startPower + s2 * endPower)
x1 = np.cos(2 * np.pi * h1) * startPower * startWeight
x2 = np.cos(2 * np.pi * h2) * endPower * endWeight
y1 = np.sin(2 * np.pi * h1) * startPower * startWeight
y2 = np.sin(2 * np.pi * h2) * endPower * endWeight
x = x1 + x2
y = y1 + y2
if progress >= 0:
l = np.maximum(l1 * startPower, l2 * endPower)
opposition = np.sqrt(np.square((x1-x2)/2) + np.square((y1-y2)/2))
if mode == 'multiply':
l -= opposition
elif mode == 'add':
l = np.maximum(l, opposition, l)
else: # hacky support for old blend
l = np.sqrt(np.square(x) + np.square(y)) / 2
h = np.arctan2(y, x) / (2*np.pi)
nocolor = (x * y == 0)
np.where(nocolor, h, 0)
np.where(nocolor, s, 0)
np.clip(l, 0, 1, l)
if output_buffer is not None:
frame = output_buffer
frame[:, 0] = h
frame[:, 1] = l
frame[:, 2] = s
else:
frame = np.asarray([h, l, s]).T
return frame
def rgb_to_hls(arr):
""" fast rgb_to_hls using numpy array """
# adapted from <NAME>
# http://www.mail-archive.com/<EMAIL>/msg06147.html
arr = arr.astype("float32") / 255.0
out = np.empty_like(arr)
arr_max = arr.max(-1)
delta = arr.ptp(-1)
arr_min = arr.min(-1)
total = arr_max + arr_min
l = total / 2.0
s = delta / total
idx = (l > 0.5)
s[idx] = delta[idx] / (2.0 - total[idx])
# red is max
idx = (arr[:,:,0] == arr_max)
out[idx, 0] = (arr[idx, 1] - arr[idx, 2]) / delta[idx]
# green is max
idx = (arr[:,:,1] == arr_max)
out[idx, 0] = 2. + (arr[idx, 2] - arr[idx, 0] ) / delta[idx]
# blue is max
idx = (arr[:,:,2] == arr_max)
out[idx, 0] = 4. + (arr[idx, 0] - arr[idx, 1] ) / delta[idx]
out[:,:,0] = (out[:,:,0]/6.0) % 1.0
out[:,:,1] = l
out[:,:,2] = s
idx = (delta==0)
out[idx, 2] = 0.0
out[idx, 0] = 0.0
# remove NaN
out[np.isnan(out)] = 0
return out
def hls_to_rgb(hls):
"""
Converts HLS color array [[H,L,S]] to RGB array.
http://en.wikipedia.org/wiki/HSL_and_HSV#From_HSL
Returns [[R,G,B]] in [0..1]
Adapted from: http://stackoverflow.com/questions/4890373/detecting-thresholds-in-hsv-color-space-from-rgb-using-python-pil/4890878#4890878
"""
H = hls[:, 0]
L = hls[:, 1]
S = hls[:, 2]
C = (1 - np.absolute(2 * L - 1)) * S
Hp = H * 6.0
i = Hp.astype(np.int)
#f = Hp - i # |H' mod 2| ?
X = C * (1 - np.absolute( | np.mod(Hp, 2) | numpy.mod |
"""Additional functions for stitching images together."""
import numpy as np
import dask.array as da
from dask.delayed import delayed
from dask.distributed import Client, LocalCluster
import matplotlib.pyplot as plt
import matplotlib as mpl
from ipywidgets import interact, interactive, fixed, interact_manual
import ipywidgets as widgets
import dask
import numba
import scipy.ndimage as ndi
from sklearn.neighbors import NearestNeighbors
import zarr
from scipy.optimize import bisect, minimize
import scipy.sparse as ssp
from .registration import *
def qhist(data, quality, binbins=20, cmap='viridis', ax=None, bins=20):
"""A helper function to plot a histogram of `data` with the spread of `quality`
over the bins indicated by color.
Parameters
----------
data : array_like
quality : array_like
Must be same shape as or broadcastable to shape of `data`
binbins : inmt, default=20
Number of different colors to show.
cmap : str, default='viridis'
Colormap to use. Must be a string
recognized by mpl.cm.get_cmap.
ax : None, or mpl.axes.Axes
Optionally specify a pre-existing mpl ax to plot on.
bins : int, default=20
Number of horizontal bins for the histogram of `data`.
"""
ds = 1 / binbins
binbin = | np.arange(0, 1, ds) | numpy.arange |
# MIT License
#
# Copyright (C) The Adversarial Robustness Toolbox (ART) Authors 2018
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
# Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
import unittest
import keras.backend as k
import numpy as np
from art.attacks.evasion.hop_skip_jump import HopSkipJump
from art.estimators.estimator import BaseEstimator
from art.estimators.classification import ClassifierMixin
from art.estimators.classification.keras import KerasClassifier
from art.utils import random_targets
from tests.utils import TestBase
from tests.utils import get_image_classifier_tf, get_image_classifier_kr, get_image_classifier_pt
from tests.utils import get_tabular_classifier_tf, get_tabular_classifier_kr
from tests.utils import get_tabular_classifier_pt, master_seed
from tests.attacks.utils import backend_test_classifier_type_check_fail
logger = logging.getLogger(__name__)
class TestHopSkipJump(TestBase):
"""
A unittest class for testing the HopSkipJump attack.
"""
@classmethod
def setUpClass(cls):
master_seed(seed=1234, set_tensorflow=True, set_torch=True)
super().setUpClass()
cls.n_train = 100
cls.n_test = 10
cls.x_train_mnist = cls.x_train_mnist[0 : cls.n_train]
cls.y_train_mnist = cls.y_train_mnist[0 : cls.n_train]
cls.x_test_mnist = cls.x_test_mnist[0 : cls.n_test]
cls.y_test_mnist = cls.y_test_mnist[0 : cls.n_test]
def setUp(self):
master_seed(seed=1234, set_tensorflow=True, set_torch=True)
super().setUp()
def test_3_tensorflow_mnist(self):
"""
First test with the TensorFlowClassifier.
:return:
"""
x_test_original = self.x_test_mnist.copy()
# Build TensorFlowClassifier
tfc, sess = get_image_classifier_tf()
# First targeted attack and norm=2
hsj = HopSkipJump(classifier=tfc, targeted=True, max_iter=20, max_eval=100, init_eval=10, verbose=False)
params = {"y": random_targets(self.y_test_mnist, tfc.nb_classes)}
x_test_adv = hsj.generate(self.x_test_mnist, **params)
self.assertFalse((self.x_test_mnist == x_test_adv).all())
self.assertTrue((x_test_adv <= 1.0001).all())
self.assertTrue((x_test_adv >= -0.0001).all())
target = np.argmax(params["y"], axis=1)
y_pred_adv = np.argmax(tfc.predict(x_test_adv), axis=1)
self.assertTrue((target == y_pred_adv).any())
# Test the masking 1
mask = np.random.binomial(n=1, p=0.5, size=np.prod(self.x_test_mnist.shape))
mask = mask.reshape(self.x_test_mnist.shape)
params.update(mask=mask)
x_test_adv = hsj.generate(self.x_test_mnist, **params)
mask_diff = (1 - mask) * (x_test_adv - self.x_test_mnist)
self.assertAlmostEqual(float(np.max(np.abs(mask_diff))), 0.0, delta=0.00001)
unmask_diff = mask * (x_test_adv - self.x_test_mnist)
self.assertGreater(float(np.sum(np.abs(unmask_diff))), 0.0)
# Test the masking 2
mask = np.random.binomial(n=1, p=0.5, size=np.prod(self.x_test_mnist.shape[1:]))
mask = mask.reshape(self.x_test_mnist.shape[1:])
params.update(mask=mask)
x_test_adv = hsj.generate(self.x_test_mnist, **params)
mask_diff = (1 - mask) * (x_test_adv - self.x_test_mnist)
self.assertAlmostEqual(float(np.max(np.abs(mask_diff))), 0.0, delta=0.00001)
unmask_diff = mask * (x_test_adv - self.x_test_mnist)
self.assertGreater(float(np.sum(np.abs(unmask_diff))), 0.0)
# First targeted attack and norm=np.inf
hsj = HopSkipJump(
classifier=tfc, targeted=True, max_iter=20, max_eval=100, init_eval=10, norm=np.Inf, verbose=False
)
params = {"y": random_targets(self.y_test_mnist, tfc.nb_classes)}
x_test_adv = hsj.generate(self.x_test_mnist, **params)
self.assertFalse((self.x_test_mnist == x_test_adv).all())
self.assertTrue((x_test_adv <= 1.0001).all())
self.assertTrue((x_test_adv >= -0.0001).all())
target = | np.argmax(params["y"], axis=1) | numpy.argmax |
import os
import tempfile
import numpy as np
import scipy.ndimage.measurements as meas
from functools import reduce
import warnings
import sys
sys.path.append(os.path.abspath(r'../lib'))
import NumCppPy as NumCpp # noqa E402
####################################################################################
def factors(n):
return set(reduce(list.__add__,
([i, n//i] for i in range(1, int(n**0.5) + 1) if n % i == 0)))
####################################################################################
def test_seed():
np.random.seed(1)
####################################################################################
def test_abs():
randValue = np.random.randint(-100, -1, [1, ]).astype(np.double).item()
assert NumCpp.absScaler(randValue) == np.abs(randValue)
components = np.random.randint(-100, -1, [2, ]).astype(np.double)
value = complex(components[0], components[1])
assert np.round(NumCpp.absScaler(value), 9) == np.round(np.abs(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.absArray(cArray), np.abs(data))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols]) + \
1j * np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.absArray(cArray), 9), np.round(np.abs(data), 9))
####################################################################################
def test_add():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(-100, 100, [shape.rows, shape.cols])
data2 = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.add(cArray1, cArray2), data1 + data2)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray.setArray(data)
value = np.random.randint(-100, 100)
assert np.array_equal(NumCpp.add(cArray, value), data + value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray.setArray(data)
value = np.random.randint(-100, 100)
assert np.array_equal(NumCpp.add(value, cArray), data + value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.add(cArray1, cArray2), data1 + data2)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
value = np.random.randint(-100, 100) + 1j * np.random.randint(-100, 100)
assert np.array_equal(NumCpp.add(cArray, value), data + value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
value = np.random.randint(-100, 100) + 1j * np.random.randint(-100, 100)
assert np.array_equal(NumCpp.add(value, cArray), data + value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArray(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
data2 = np.random.randint(1, 100, [shape.rows, shape.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.add(cArray1, cArray2), data1 + data2)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
data1 = np.random.randint(1, 100, [shape.rows, shape.cols])
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.add(cArray1, cArray2), data1 + data2)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray.setArray(data)
value = np.random.randint(-100, 100) + 1j * np.random.randint(-100, 100)
assert np.array_equal(NumCpp.add(cArray, value), data + value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray.setArray(data)
value = np.random.randint(-100, 100) + 1j * np.random.randint(-100, 100)
assert np.array_equal(NumCpp.add(value, cArray), data + value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
value = np.random.randint(-100, 100)
assert np.array_equal(NumCpp.add(cArray, value), data + value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
value = np.random.randint(-100, 100)
assert np.array_equal(NumCpp.add(value, cArray), data + value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.add(cArray1, cArray2), data1 + data2)
####################################################################################
def test_alen():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert NumCpp.alen(cArray) == shape.rows
####################################################################################
def test_all():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert NumCpp.all(cArray, NumCpp.Axis.NONE).astype(bool).item() == np.all(data).item()
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert NumCpp.all(cArray, NumCpp.Axis.NONE).astype(bool).item() == np.all(data).item()
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.all(cArray, NumCpp.Axis.ROW).flatten().astype(bool), np.all(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.all(cArray, NumCpp.Axis.ROW).flatten().astype(bool), np.all(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = | np.random.randint(0, 100, [shape.rows, shape.cols]) | numpy.random.randint |
# --------------------------------------------------------
# Deep Iterative Matching Network
# Licensed under The Apache-2.0 License [see LICENSE for details]
# Written by <NAME>, <NAME>
# --------------------------------------------------------
from __future__ import print_function, division
import numpy as np
import os, sys
cur_path = os.path.abspath(os.path.dirname(__file__))
sys.path.insert(1, os.path.join(cur_path, ".."))
from lib.pair_matching.RT_transform import *
from lib.utils.mkdir_if_missing import mkdir_if_missing
import random
from lib.render_glumpy.render_py_light_modelnet import Render_Py_Light_ModelNet
import cv2
classes = [
"airplane",
"bed",
"bench",
"bookshelf",
"car",
"chair",
"guitar",
"laptop",
"mantel", #'dresser',
"piano",
"range_hood",
"sink",
"stairs",
"stool",
"tent",
"toilet",
"tv_stand",
"door",
"glass_box",
"wardrobe",
"plant",
"xbox",
"bathtub",
"table",
"monitor",
"sofa",
"night_stand",
]
# print(classes)
# config for renderer
width = 640
height = 480
K = np.array([[572.4114, 0, 325.2611], [0, 573.57043, 242.04899], [0, 0, 1]]) # LM
ZNEAR = 0.25
ZFAR = 6.0
depth_factor = 1000
modelnet_root = os.path.join(cur_path, "../data/ModelNet")
modelnet40_root = os.path.join(modelnet_root, "ModelNet40")
import argparse
def parse_args():
parser = argparse.ArgumentParser(description="renderer")
parser.add_argument("--model_path", required=True, help="model path")
parser.add_argument("--texture_path", required=True, help="texture path")
parser.add_argument("--seed", required=True, type=int, help="seed")
args = parser.parse_args()
return args
args = parse_args()
model_path = args.model_path
texture_path = args.texture_path
random.seed(args.seed)
np.random.seed(args.seed)
def angle_axis_to_quat(angle, rot_axis):
angle = angle % (2 * np.pi)
# print(angle)
q = np.zeros(4)
q[0] = np.cos(0.5 * angle)
q[1:] = np.sin(0.5 * angle) * rot_axis
if q[0] < 0:
q *= -1
# print('norm of q: ', LA.norm(q))
q = q / np.linalg.norm(q)
# print('norm of q: ', LA.norm(q))
return q
def angle(u, v):
c = np.dot(u, v) / (np.linalg.norm(u) * np.linalg.norm(v)) # -> cosine of the angle
rad = np.arccos(np.clip(c, -1, 1))
deg = rad / np.pi * 180
return deg
# init render machines
brightness_ratios = [0.7] ###################
render_machine = Render_Py_Light_ModelNet(
model_path, texture_path, K, width, height, ZNEAR, ZFAR, brightness_ratios
)
def gen_real():
data_dir = os.path.join(modelnet_root, "modelnet_render_v1/data/real")
mkdir_if_missing(data_dir)
real_set_dir = os.path.join(modelnet_root, "modelnet_render_v1/image_set/real")
mkdir_if_missing(real_set_dir)
# -----------------
model_folder_string_list = model_path.split("/")
cls_name = model_folder_string_list[-3]
cls_idx = classes.index(cls_name) + 1
set = model_folder_string_list[-2]
data_dir = os.path.join(data_dir, cls_name, set)
mkdir_if_missing(data_dir)
model_prefix = model_folder_string_list[-1].split(".")[0]
pz_up_dict = {
"car": np.array([0, -1, 0]),
"airplane": | np.array([0, -1, 0]) | numpy.array |
#NUMBA ############################################
import numba as nb
import numpy as np
import math as m
@nb.njit
def dummy():
return None
@nb.njit(cache=True,error_model='numpy')
def fit_multiplicative_offset_jitter(x0,f,y,dy):
off=x0[0]
jit=x0[1]
newerr=np.sqrt((dy)**2+jit**2)/off
lnL=-0.5*np.sum(((y/off-f)/(newerr))**2.0+np.log(2.0*np.pi)+np.log(newerr**2))
return -lnL
@nb.njit(cache=True,error_model='numpy')
def fit_only_multiplicative_offset(x0,f,y,dy):
off=x0
lnL=-0.5*np.sum(((y/off-f)/(dy/off))**2.0+np.log(2.0*np.pi)+np.log((dy/off)**2))
return -lnL
@nb.njit(cache=True,error_model='numpy')
def fit_linear_offset_jitter(x0,f,y,dy):
off=x0[0]
jit=x0[1]
lnL=-0.5*np.sum(((y-off-f)/(np.sqrt(dy**2+jit**2)))**2.0+np.log(2.0*np.pi)+np.log(dy**2+jit**2))
return -lnL
@nb.njit(cache=True,error_model='numpy')
def fit_only_linear_offset(x0,f,y,dy):
off=x0
lnL=-0.5*np.sum(((y-off-f)/(dy))**2.0+np.log(2.0*np.pi)+np.log(dy**2))
return -lnL
@nb.njit(cache=True,error_model='numpy')
def fit_only_jitter(x0,f,y,dy):
jit=x0
lnL=-0.5*np.sum(((y-f)/(np.sqrt(dy**2+jit**2)))**2.0+np.log(2.0*np.pi)+np.log(dy**2+jit**2))
return -lnL
@nb.njit(cache=True,error_model='numpy')
def _coeff_mat(x, deg):
mat_ = np.zeros(shape=(x.shape[0],deg + 1))
const = np.ones_like(x)
mat_[:,0] = const
mat_[:, 1] = x
if deg > 1:
for n in range(2, deg + 1):
mat_[:, n] = x**n
return mat_
@nb.njit(cache=True,error_model='numpy')
def _fit_x(a, b):
# linalg solves ax = b
det_ = np.linalg.lstsq(a, b)[0]
return det_
@nb.njit(cache=True,error_model='numpy')
def fit_poly(x, y, deg,w):
a = _coeff_mat(x, deg)*w.reshape(-1,1)
p = _fit_x(a, y*w)
# Reverse order so p[0] is coefficient of highest order
return p[::-1]
#####################################################
############# UTILITIES #############################
#####################################################
@nb.njit(cache=True,error_model='numpy')
def gaussian(x, amplitude, mean, stddev):
return amplitude * np.exp(-(x-mean)**2/(2*stddev**2))
@nb.njit(cache=True,error_model='numpy')
def gaussian2(x, amplitude, mean, stddev,C):
return C + amplitude * np.exp(-(x-mean)**2/(2*stddev**2))
@nb.njit(cache=True,error_model='numpy')
def normalize_spectra_nb(bins,wavelength,flux):
x_bin=np.zeros(len(bins)-1)
y_bin=np.zeros(len(bins)-1)
for i in range(len(bins)-1):
idxup = wavelength>bins[i]
idxdown= wavelength<bins[i+1]
idx=idxup & idxdown
y_bin[i]=flux[idx].max()
x_bin[i]=wavelength[idx][np.argmax(flux[idx])]
#divide by 6th deg polynomial
return x_bin, y_bin
@nb.njit(cache=True,error_model='numpy')
def interpolation_nb(xp,x,y,left=0,right=0):
# Create result array
yp=np.zeros(len(xp))
minx=x[0]
maxx=x[-1]
lastidx=1
for i,xi in enumerate(xp):
if xi<minx: #extrapolate left
yp[i]=left
elif xi>maxx: #extrapolate right
yp[i]=right
else:
for j in range(lastidx,len(x)): #per no fer el loop sobre tota la x, ja que esta sorted sempre comenso amb lanterior.
if x[j]>xi:
#Trobo el primer x mes gran que xj. llavors utilitzo x[j] i x[j-1] per interpolar
yp[i]=y[j-1]+(xi-x[j-1])*(y[j]-y[j-1])/(x[j]-x[j-1])
lastidx=j
break
return yp
@nb.njit(cache=True,error_model='numpy')
def cross_correlation_nb(rv,wv,flx,wv_ref,flx_ref):
#Compute the CCF against the reference spectrum. Can be optimized.
ccf=np.zeros(len(rv)) #initialize ccf
lenf=len(flx_ref)
for i in range(len(rv)):
wvshift=wv_ref*(1.0+rv[i]/2.99792458e8) #shift ref spectrum, in m/s
# fshift=np.interp(wvshift,wv,flx)
fshift = interpolation_nb(wvshift,wv,flx,left=0,right=0)
ccf[i]=np.sum(flx_ref*fshift)/lenf #compute ccf
return (ccf-np.min(ccf))/np.max((ccf-np.min(ccf)))
@nb.njit(cache=True,error_model='numpy')
def cross_correlation_mask(rv,wv,f,wvm,fm):
ccf = np.zeros(len(rv))
lenm = len(wvm)
wvmin=wv[0]
for i in range(len(rv)):
wvshift=wvm*(1.0+rv[i]/2.99792458e8) #shift ref spectrum, in m/s
#for each mask line
for j in range(lenm):
#find wavelengths right and left of the line.
wvline=wvshift[j]
if wvline<3000.0:
idxlf = int((wvline-wvmin)/0.1)
elif wvline<4999.986:
if wvmin<3000.0:
idxlf = np.round(int((3000.0-wvmin)/0.1)) + int((wvline-3000.0)/0.006)
else:
idxlf = int((wvline-wvmin)/0.006)
elif wvline<5000.0:
if wvmin<3000.0:
idxlf = np.round(int((3000.0-wvmin)/0.1)) + int((4999.986-3000.0)/0.006) + 1
else:
idxlf = int((4999.986-wvmin)/0.006) + 1
elif wvline<10000.0:
if wvmin<3000.0:
idxlf = np.round(int((3000.0-wvmin)/0.1)) + int((4999.986-3000.0)/0.006) + 1 + int((wvline-5000.0)/0.01)
elif wvmin<4999.986:
idxlf = int((4999.986-wvmin)/0.006) + 1 + int((wvline-5000.0)/0.01)
else:
idxlf = int((wvline-wvmin)/0.01)
elif wvline<15000.0:
if wvmin<3000.0:
idxlf = np.round(int((3000.0-wvmin)/0.1)) + int((4999.986-3000.0)/0.006) + 1 + int((10000.0-5000.0)/0.01) + int((wvline-10000.0)/0.02)
elif wvmin<4999.986:
idxlf = int((4999.986-wvmin)/0.006) + 1 + int((10000-5000.0)/0.01) + int((wvline-10000.0)/0.02)
elif wvmin<10000.0:
idxlf = int((10000.0-wvmin)/0.01) + int((wvline-10000.0)/0.02)
else:
idxlf = int((wvline-wvmin)/0.02)
else:
if wvmin<3000.0:
idxlf = np.round(int((3000.0-wvmin)/0.1)) + int((4999.986-3000.0)/0.006) + 1 + int((10000.0-5000.0)/0.01) + int((15000.0-10000.0)/0.02) + int((wvline-15000.0)/0.03)
elif wvmin<4999.986:
idxlf = int((4999.986-wvmin)/0.006) + 1 + int((10000-5000.0)/0.01) + int((15000-10000.0)/0.02) + int((wvline-15000.0)/0.03)
elif wvmin<10000.0:
idxlf = int((10000.0-wvmin)/0.01) + int((15000-10000.0)/0.02) + int((wvline-15000.0)/0.03)
elif wvmin<15000.0:
idxlf = int((15000-wvmin)/0.02) + int((wvline-15000.0)/0.03)
else:
idxlf = int((wvline-wvmin)/0.03)
idxrg = idxlf + 1
diffwv=wv[idxrg]-wv[idxlf] #pixel size in wavelength
midpix=(wv[idxrg]+wv[idxlf])/2 #wavelength between the two pixels
leftmask = wvline - diffwv/2 #left edge of the mask
rightmask = wvline + diffwv/2 #right edge of the mask
frac1 = (midpix - leftmask)/diffwv #fraction of the mask ovelapping the left pixel
frac2 = (rightmask - midpix)/diffwv #fraction of the mask overlapping the right pixel
midleft = (leftmask + midpix)/2 #central left overlapp
midright = (rightmask + midpix)/2 #central wv right overlap
f1 = f[idxlf] + (midleft-wv[idxlf])*(f[idxrg]-f[idxlf])/(diffwv)
f2 = f[idxlf] + (midright-wv[idxlf])*(f[idxrg]-f[idxlf])/(diffwv)
ccf[i]=ccf[i] - f1*fm[j]*frac1 - f2*fm[j]*frac2
return (ccf-np.min(ccf))/np.max((ccf-np.min(ccf)))
@nb.njit(cache=True,error_model='numpy')
def weight_mask(wvi,wvf,o_weight,wvm,fm):
j=0
maxj=len(wvi)
for i in range(len(wvm)):
if wvm[i]<wvi[j]:
fm[i]=0.0
elif wvm[i]>=wvi[j] and wvm[i]<=wvf[j]:
fm[i]=fm[i]*o_weight[j]
elif wvm[i]>wvf[j]:
j+=1
if j>=maxj:
fm[i]=0.0
break
else:
i-=1
return wvm, fm
@nb.njit(cache=True,error_model='numpy')
def polar2colatitude_nb(r,a,i):
'''Enters the polars coordinates and the inclination i (with respect to the north pole, i=0 makes transits, 90-(inclination defined in exoplanets))
Returns the colatitude in the star (90-latitude)
'''
a=a*m.pi/180.
i=-i #negative to make the rotation toward the observer.
theta=m.acos(r*m.sin(a)*m.cos(i)-m.sin(i)*m.sqrt(1-r*r))
return theta
@nb.njit(cache=True,error_model='numpy')
def polar2longitude_nb(r,a,i):
'''Enters the polars coordinates and the inclination i (with respect to the north pole, i=0 makes transits, 90-(inclination defined in exoplanets))
Returns the longitude in the star (from -90 to 90)
'''
a=a*m.pi/180.
i=-i #negative to make the rotation toward the observer.
h=m.sqrt((1.-(r*m.cos(a))**2.)/(m.tan(i)**2.+1.)) #heigh of the terminator (long=pi/2)
if r*np.sin(a)>h:
phi=m.asin(-r*m.cos(a)/m.sqrt(1.-(r*m.sin(a)*m.cos(i)-m.sin(i)*m.sqrt(1.-r*r))**2.))+m.pi #to correct for mirroring of longitudes in the terminator
else:
phi=m.asin(r*m.cos(a)/m.sqrt(1.-(r*m.sin(a)*m.cos(i)-m.sin(i)*m.sqrt(1.-r*r))**2.))
return phi
@nb.njit(cache=True,error_model='numpy')
def speed_bisector_nb(rv,ccf,integrated_bis):
''' Fit the bisector of the CCF with a 5th deg polynomial
'''
idxmax=ccf.argmax()
maxccf=ccf[idxmax]
maxrv=rv[idxmax]
xnew = rv
ynew = ccf
cutleft=0
cutright=len(ynew)-1
# if not integrated_bis: #cut the CCF at the minimum of the wings only for reference CCF, if not there are errors.
for i in range(len(ynew)):
if xnew[i]>maxrv:
if ynew[i]>ynew[i-1]:
cutright=i
break
for i in range(len(ynew)):
if xnew[-1-i]<maxrv:
if ynew[-1-i]>ynew[-i]:
cutleft=len(ynew)-i
break
xnew=xnew[cutleft:cutright]
ynew=ynew[cutleft:cutright]
minright=np.min(ynew[xnew>maxrv])
minleft=np.min(ynew[xnew<maxrv])
minccf=np.max(np.array([minright,minleft]))
ybis=np.linspace(minccf+0.01*(maxccf-minccf),0.999*maxccf,50) #from 5% to maximum
xbis=np.zeros(len(ybis))
for i in range(len(ybis)):
for j in range(len(ynew)-1):
if ynew[j]<ybis[i] and ynew[j+1]>ybis[i] and xnew[j]<maxrv:
rv1=xnew[j]+(xnew[j+1]-xnew[j])*(ybis[i]-ynew[j])/(ynew[j+1]-ynew[j])
if ynew[j]>ybis[i] and ynew[j+1]<ybis[i] and xnew[j+1]>maxrv:
rv2=xnew[j]+(xnew[j+1]-xnew[j])*(ybis[i]-ynew[j])/(ynew[j+1]-ynew[j])
xbis[i]=(rv1+rv2)/2.0 #bisector
# xbis[-1]=maxrv #at the top should be max RV
return cutleft,cutright,xbis,ybis
@nb.njit(cache=True,error_model='numpy')
def limb_darkening_law(LD_law,LD1,LD2,amu):
if LD_law == 'linear':
mu=1-LD1*(1-amu)
elif LD_law == 'quadratic':
a=2*np.sqrt(LD1)*LD2
b=np.sqrt(LD1)*(1-2*LD2)
mu=1-a*(1-amu)-b*(1-amu)**2
elif LD_law == 'sqrt':
a=np.sqrt(LD1)*(1-2*LD2)
b=2*np.sqrt(LD1)*LD2
mu=1-a*(1-amu)-b*(1-np.sqrt(amu))
elif LD_law == 'log':
a=LD2*LD1**2+1
b=LD1**2-1
mu=1-a*(1-amu)-b*amu*(1-np.log(amu))
else:
print('LD law not valid.')
return mu
@nb.njit(cache=True,error_model='numpy')
def compute_spot_position(t,spot_map,ref_time,Prot,diff_rot,Revo,Q):
pos=np.zeros((len(spot_map),4))
for i in range(len(spot_map)):
tini = spot_map[i][0] #time of spot apparence
dur = spot_map[i][1] #duration of the spot
tfin = tini + dur #final time of spot
colat = spot_map[i][2] #colatitude
lat = 90 - colat #latitude
longi = spot_map[i][3] #longitude
Rcoef = spot_map[i][4:7] #coefficients for the evolution od the radius. Depends on the desired law.
pht = longi + (t-ref_time)/Prot%1*360
#update longitude adding diff rotation
phsr= pht + (t-ref_time)*diff_rot*(1.698*m.sin(np.deg2rad(lat))**2+2.346*m.sin(np.deg2rad(lat))**4)
if Revo == 'constant':
if t>=tini and t<=tfin:
rad=Rcoef[0]
else:
rad=0.0
elif Revo == 'linear':
if t>=tini and t<=tfin:
rad=Rcoef[0]+(t-tini)*(Rcoef[1]-Rcoef[0])/dur
else:
rad=0.0
elif Revo == 'quadratic':
if t>=tini and t<=tfin:
rad=-4*Rcoef[0]/(dur*(1-2*tini))*(t-tini)*(t-tini-dur)
else:
rad=0.0
else:
print('Spot evolution law not implemented yet. Only constant and linear are implemented.')
if Q!=0.0: #to speed up the code when no fac are present
rad_fac=np.deg2rad(rad)*m.sqrt(1+Q)
else: rad_fac=0.0
pos[i]=np.array([np.deg2rad(colat), np.deg2rad(phsr), | np.deg2rad(rad) | numpy.deg2rad |
from __future__ import print_function, division, absolute_import
import functools
import sys
import warnings
# unittest only added in 3.4 self.subTest()
if sys.version_info[0] < 3 or sys.version_info[1] < 4:
import unittest2 as unittest
else:
import unittest
# unittest.mock is not available in 2.7 (though unittest2 might contain it?)
try:
import unittest.mock as mock
except ImportError:
import mock
import matplotlib
matplotlib.use('Agg') # fix execution of tests involving matplotlib on travis
import numpy as np
import six.moves as sm
import imgaug as ia
from imgaug import augmenters as iaa
from imgaug import parameters as iap
from imgaug import dtypes as iadt
from imgaug.testutils import (array_equal_lists, keypoints_equal, reseed,
runtest_pickleable_uint8_img)
import imgaug.augmenters.arithmetic as arithmetic_lib
import imgaug.augmenters.contrast as contrast_lib
class TestAdd(unittest.TestCase):
def setUp(self):
reseed()
def test___init___bad_datatypes(self):
# test exceptions for wrong parameter types
got_exception = False
try:
_ = iaa.Add(value="test")
except Exception:
got_exception = True
assert got_exception
got_exception = False
try:
_ = iaa.Add(value=1, per_channel="test")
except Exception:
got_exception = True
assert got_exception
def test_add_zero(self):
# no add, shouldnt change anything
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
images_list = [base_img]
aug = iaa.Add(value=0)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
assert observed.shape == (1, 3, 3, 1)
observed = aug.augment_images(images_list)
expected = images_list
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = images_list
assert array_equal_lists(observed, expected)
def test_add_one(self):
# add > 0
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
images_list = [base_img]
aug = iaa.Add(value=1)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = images + 1
assert np.array_equal(observed, expected)
assert observed.shape == (1, 3, 3, 1)
observed = aug.augment_images(images_list)
expected = [images_list[0] + 1]
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = images + 1
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = [images_list[0] + 1]
assert array_equal_lists(observed, expected)
def test_minus_one(self):
# add < 0
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
images_list = [base_img]
aug = iaa.Add(value=-1)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = images - 1
assert np.array_equal(observed, expected)
observed = aug.augment_images(images_list)
expected = [images_list[0] - 1]
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = images - 1
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = [images_list[0] - 1]
assert array_equal_lists(observed, expected)
def test_uint8_every_possible_value(self):
# uint8, every possible addition for base value 127
for value_type in [float, int]:
for per_channel in [False, True]:
for value in np.arange(-255, 255+1):
aug = iaa.Add(value=value_type(value), per_channel=per_channel)
expected = np.clip(127 + value_type(value), 0, 255)
img = np.full((1, 1), 127, dtype=np.uint8)
img_aug = aug.augment_image(img)
assert img_aug.item(0) == expected
img = np.full((1, 1, 3), 127, dtype=np.uint8)
img_aug = aug.augment_image(img)
assert np.all(img_aug == expected)
def test_add_floats(self):
# specific tests with floats
aug = iaa.Add(value=0.75)
img = np.full((1, 1), 1, dtype=np.uint8)
img_aug = aug.augment_image(img)
assert img_aug.item(0) == 2
img = np.full((1, 1), 1, dtype=np.uint16)
img_aug = aug.augment_image(img)
assert img_aug.item(0) == 2
aug = iaa.Add(value=0.45)
img = np.full((1, 1), 1, dtype=np.uint8)
img_aug = aug.augment_image(img)
assert img_aug.item(0) == 1
img = np.full((1, 1), 1, dtype=np.uint16)
img_aug = aug.augment_image(img)
assert img_aug.item(0) == 1
def test_stochastic_parameters_as_value(self):
# test other parameters
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
aug = iaa.Add(value=iap.DiscreteUniform(1, 10))
observed = aug.augment_images(images)
assert 100 + 1 <= np.average(observed) <= 100 + 10
aug = iaa.Add(value=iap.Uniform(1, 10))
observed = aug.augment_images(images)
assert 100 + 1 <= np.average(observed) <= 100 + 10
aug = iaa.Add(value=iap.Clip(iap.Normal(1, 1), -3, 3))
observed = aug.augment_images(images)
assert 100 - 3 <= np.average(observed) <= 100 + 3
aug = iaa.Add(value=iap.Discretize(iap.Clip(iap.Normal(1, 1), -3, 3)))
observed = aug.augment_images(images)
assert 100 - 3 <= np.average(observed) <= 100 + 3
def test_keypoints_dont_change(self):
# keypoints shouldnt be changed
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=0, y=0), ia.Keypoint(x=1, y=1),
ia.Keypoint(x=2, y=2)], shape=base_img.shape)]
aug = iaa.Add(value=1)
aug_det = iaa.Add(value=1).to_deterministic()
observed = aug.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
observed = aug_det.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
def test_tuple_as_value(self):
# varying values
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
aug = iaa.Add(value=(0, 10))
aug_det = aug.to_deterministic()
last_aug = None
last_aug_det = None
nb_changed_aug = 0
nb_changed_aug_det = 0
nb_iterations = 1000
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
observed_aug_det = aug_det.augment_images(images)
if i == 0:
last_aug = observed_aug
last_aug_det = observed_aug_det
else:
if not np.array_equal(observed_aug, last_aug):
nb_changed_aug += 1
if not np.array_equal(observed_aug_det, last_aug_det):
nb_changed_aug_det += 1
last_aug = observed_aug
last_aug_det = observed_aug_det
assert nb_changed_aug >= int(nb_iterations * 0.7)
assert nb_changed_aug_det == 0
def test_per_channel(self):
# test channelwise
aug = iaa.Add(value=iap.Choice([0, 1]), per_channel=True)
observed = aug.augment_image(np.zeros((1, 1, 100), dtype=np.uint8))
uq = np.unique(observed)
assert observed.shape == (1, 1, 100)
assert 0 in uq
assert 1 in uq
assert len(uq) == 2
def test_per_channel_with_probability(self):
# test channelwise with probability
aug = iaa.Add(value=iap.Choice([0, 1]), per_channel=0.5)
seen = [0, 0]
for _ in sm.xrange(400):
observed = aug.augment_image(np.zeros((1, 1, 20), dtype=np.uint8))
assert observed.shape == (1, 1, 20)
uq = np.unique(observed)
per_channel = (len(uq) == 2)
if per_channel:
seen[0] += 1
else:
seen[1] += 1
assert 150 < seen[0] < 250
assert 150 < seen[1] < 250
def test_zero_sized_axes(self):
shapes = [
(0, 0),
(0, 1),
(1, 0),
(0, 1, 0),
(1, 0, 0),
(0, 1, 1),
(1, 0, 1)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.zeros(shape, dtype=np.uint8)
aug = iaa.Add(1)
image_aug = aug(image=image)
assert np.all(image_aug == 1)
assert image_aug.dtype.name == "uint8"
assert image_aug.shape == image.shape
def test_unusual_channel_numbers(self):
shapes = [
(1, 1, 4),
(1, 1, 5),
(1, 1, 512),
(1, 1, 513)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.zeros(shape, dtype=np.uint8)
aug = iaa.Add(1)
image_aug = aug(image=image)
assert np.all(image_aug == 1)
assert image_aug.dtype.name == "uint8"
assert image_aug.shape == image.shape
def test_get_parameters(self):
# test get_parameters()
aug = iaa.Add(value=1, per_channel=False)
params = aug.get_parameters()
assert isinstance(params[0], iap.Deterministic)
assert isinstance(params[1], iap.Deterministic)
assert params[0].value == 1
assert params[1].value == 0
def test_heatmaps(self):
# test heatmaps (not affected by augmenter)
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
aug = iaa.Add(value=10)
hm = ia.quokka_heatmap()
hm_aug = aug.augment_heatmaps([hm])[0]
assert np.allclose(hm.arr_0to1, hm_aug.arr_0to1)
def test_other_dtypes_bool(self):
image = np.zeros((3, 3), dtype=bool)
aug = iaa.Add(value=1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 1)
image = np.full((3, 3), True, dtype=bool)
aug = iaa.Add(value=1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 1)
image = np.full((3, 3), True, dtype=bool)
aug = iaa.Add(value=-1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 0)
image = np.full((3, 3), True, dtype=bool)
aug = iaa.Add(value=-2)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 0)
def test_other_dtypes_uint_int(self):
for dtype in [np.uint8, np.uint16, np.int8, np.int16]:
min_value, center_value, max_value = iadt.get_value_range_of_dtype(dtype)
image = np.full((3, 3), min_value, dtype=dtype)
aug = iaa.Add(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == min_value + 1)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.Add(11)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == min_value + 21)
image = np.full((3, 3), max_value - 2, dtype=dtype)
aug = iaa.Add(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == max_value - 1)
image = np.full((3, 3), max_value - 1, dtype=dtype)
aug = iaa.Add(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == max_value)
image = np.full((3, 3), max_value - 1, dtype=dtype)
aug = iaa.Add(2)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == max_value)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.Add(-9)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == min_value + 1)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.Add(-10)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == min_value)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.Add(-11)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == min_value)
for _ in sm.xrange(10):
image = np.full((1, 1, 3), 20, dtype=dtype)
aug = iaa.Add(iap.Uniform(-10, 10))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(10 <= image_aug, image_aug <= 30))
assert len(np.unique(image_aug)) == 1
image = np.full((1, 1, 100), 20, dtype=dtype)
aug = iaa.Add(iap.Uniform(-10, 10), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(10 <= image_aug, image_aug <= 30))
assert len(np.unique(image_aug)) > 1
image = np.full((1, 1, 3), 20, dtype=dtype)
aug = iaa.Add(iap.DiscreteUniform(-10, 10))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(10 <= image_aug, image_aug <= 30))
assert len(np.unique(image_aug)) == 1
image = np.full((1, 1, 100), 20, dtype=dtype)
aug = iaa.Add(iap.DiscreteUniform(-10, 10), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(10 <= image_aug, image_aug <= 30))
assert len(np.unique(image_aug)) > 1
def test_other_dtypes_float(self):
# float
for dtype in [np.float16, np.float32]:
min_value, center_value, max_value = iadt.get_value_range_of_dtype(dtype)
if dtype == np.float16:
atol = 1e-3 * max_value
else:
atol = 1e-9 * max_value
_allclose = functools.partial(np.allclose, atol=atol, rtol=0)
image = np.full((3, 3), min_value, dtype=dtype)
aug = iaa.Add(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, min_value + 1)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.Add(11)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, min_value + 21)
image = np.full((3, 3), max_value - 2, dtype=dtype)
aug = iaa.Add(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, max_value - 1)
image = np.full((3, 3), max_value - 1, dtype=dtype)
aug = iaa.Add(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, max_value)
image = np.full((3, 3), max_value - 1, dtype=dtype)
aug = iaa.Add(2)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, max_value)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.Add(-9)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, min_value + 1)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.Add(-10)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, min_value)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.Add(-11)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, min_value)
for _ in sm.xrange(10):
image = np.full((50, 1, 3), 0, dtype=dtype)
aug = iaa.Add(iap.Uniform(-10, 10))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-10 - 1e-2 < image_aug, image_aug < 10 + 1e-2))
assert np.allclose(image_aug[1:, :, 0], image_aug[:-1, :, 0])
assert np.allclose(image_aug[..., 0], image_aug[..., 1])
image = np.full((1, 1, 100), 0, dtype=dtype)
aug = iaa.Add(iap.Uniform(-10, 10), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-10 - 1e-2 < image_aug, image_aug < 10 + 1e-2))
assert not np.allclose(image_aug[:, :, 1:], image_aug[:, :, :-1])
image = np.full((50, 1, 3), 0, dtype=dtype)
aug = iaa.Add(iap.DiscreteUniform(-10, 10))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-10 - 1e-2 < image_aug, image_aug < 10 + 1e-2))
assert np.allclose(image_aug[1:, :, 0], image_aug[:-1, :, 0])
assert np.allclose(image_aug[..., 0], image_aug[..., 1])
image = np.full((1, 1, 100), 0, dtype=dtype)
aug = iaa.Add(iap.DiscreteUniform(-10, 10), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-10 - 1e-2 < image_aug, image_aug < 10 + 1e-2))
assert not np.allclose(image_aug[:, :, 1:], image_aug[:, :, :-1])
def test_pickleable(self):
aug = iaa.Add((0, 50), per_channel=True, random_state=1)
runtest_pickleable_uint8_img(aug, iterations=10)
class TestAddElementwise(unittest.TestCase):
def setUp(self):
reseed()
def test___init___bad_datatypes(self):
# test exceptions for wrong parameter types
got_exception = False
try:
_aug = iaa.AddElementwise(value="test")
except Exception:
got_exception = True
assert got_exception
got_exception = False
try:
_aug = iaa.AddElementwise(value=1, per_channel="test")
except Exception:
got_exception = True
assert got_exception
def test_add_zero(self):
# no add, shouldnt change anything
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
images_list = [base_img]
aug = iaa.AddElementwise(value=0)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
assert observed.shape == (1, 3, 3, 1)
observed = aug.augment_images(images_list)
expected = images_list
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = images_list
assert array_equal_lists(observed, expected)
def test_add_one(self):
# add > 0
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
images_list = [base_img]
aug = iaa.AddElementwise(value=1)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = images + 1
assert np.array_equal(observed, expected)
assert observed.shape == (1, 3, 3, 1)
observed = aug.augment_images(images_list)
expected = [images_list[0] + 1]
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = images + 1
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = [images_list[0] + 1]
assert array_equal_lists(observed, expected)
def test_add_minus_one(self):
# add < 0
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
images_list = [base_img]
aug = iaa.AddElementwise(value=-1)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = images - 1
assert np.array_equal(observed, expected)
observed = aug.augment_images(images_list)
expected = [images_list[0] - 1]
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = images - 1
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = [images_list[0] - 1]
assert array_equal_lists(observed, expected)
def test_uint8_every_possible_value(self):
# uint8, every possible addition for base value 127
for value_type in [int]:
for per_channel in [False, True]:
for value in np.arange(-255, 255+1):
aug = iaa.AddElementwise(value=value_type(value), per_channel=per_channel)
expected = np.clip(127 + value_type(value), 0, 255)
img = np.full((1, 1), 127, dtype=np.uint8)
img_aug = aug.augment_image(img)
assert img_aug.item(0) == expected
img = np.full((1, 1, 3), 127, dtype=np.uint8)
img_aug = aug.augment_image(img)
assert np.all(img_aug == expected)
def test_stochastic_parameters_as_value(self):
# test other parameters
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
aug = iaa.AddElementwise(value=iap.DiscreteUniform(1, 10))
observed = aug.augment_images(images)
assert np.min(observed) >= 100 + 1
assert np.max(observed) <= 100 + 10
aug = iaa.AddElementwise(value=iap.Uniform(1, 10))
observed = aug.augment_images(images)
assert np.min(observed) >= 100 + 1
assert np.max(observed) <= 100 + 10
aug = iaa.AddElementwise(value=iap.Clip(iap.Normal(1, 1), -3, 3))
observed = aug.augment_images(images)
assert np.min(observed) >= 100 - 3
assert np.max(observed) <= 100 + 3
aug = iaa.AddElementwise(value=iap.Discretize(iap.Clip(iap.Normal(1, 1), -3, 3)))
observed = aug.augment_images(images)
assert np.min(observed) >= 100 - 3
assert np.max(observed) <= 100 + 3
def test_keypoints_dont_change(self):
# keypoints shouldnt be changed
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=0, y=0), ia.Keypoint(x=1, y=1),
ia.Keypoint(x=2, y=2)], shape=base_img.shape)]
aug = iaa.AddElementwise(value=1)
aug_det = iaa.AddElementwise(value=1).to_deterministic()
observed = aug.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
observed = aug_det.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
def test_tuple_as_value(self):
# varying values
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
aug = iaa.AddElementwise(value=(0, 10))
aug_det = aug.to_deterministic()
last_aug = None
last_aug_det = None
nb_changed_aug = 0
nb_changed_aug_det = 0
nb_iterations = 1000
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
observed_aug_det = aug_det.augment_images(images)
if i == 0:
last_aug = observed_aug
last_aug_det = observed_aug_det
else:
if not np.array_equal(observed_aug, last_aug):
nb_changed_aug += 1
if not np.array_equal(observed_aug_det, last_aug_det):
nb_changed_aug_det += 1
last_aug = observed_aug
last_aug_det = observed_aug_det
assert nb_changed_aug >= int(nb_iterations * 0.7)
assert nb_changed_aug_det == 0
def test_samples_change_by_spatial_location(self):
# values should change between pixels
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
aug = iaa.AddElementwise(value=(-50, 50))
nb_same = 0
nb_different = 0
nb_iterations = 1000
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
observed_aug_flat = observed_aug.flatten()
last = None
for j in sm.xrange(observed_aug_flat.size):
if last is not None:
v = observed_aug_flat[j]
if v - 0.0001 <= last <= v + 0.0001:
nb_same += 1
else:
nb_different += 1
last = observed_aug_flat[j]
assert nb_different > 0.9 * (nb_different + nb_same)
def test_per_channel(self):
# test channelwise
aug = iaa.AddElementwise(value=iap.Choice([0, 1]), per_channel=True)
observed = aug.augment_image(np.zeros((100, 100, 3), dtype=np.uint8))
sums = np.sum(observed, axis=2)
values = np.unique(sums)
assert all([(value in values) for value in [0, 1, 2, 3]])
def test_per_channel_with_probability(self):
# test channelwise with probability
aug = iaa.AddElementwise(value=iap.Choice([0, 1]), per_channel=0.5)
seen = [0, 0]
for _ in sm.xrange(400):
observed = aug.augment_image(np.zeros((20, 20, 3), dtype=np.uint8))
sums = np.sum(observed, axis=2)
values = np.unique(sums)
all_values_found = all([(value in values) for value in [0, 1, 2, 3]])
if all_values_found:
seen[0] += 1
else:
seen[1] += 1
assert 150 < seen[0] < 250
assert 150 < seen[1] < 250
def test_zero_sized_axes(self):
shapes = [
(0, 0),
(0, 1),
(1, 0),
(0, 1, 0),
(1, 0, 0),
(0, 1, 1),
(1, 0, 1)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.zeros(shape, dtype=np.uint8)
aug = iaa.AddElementwise(1)
image_aug = aug(image=image)
assert np.all(image_aug == 1)
assert image_aug.dtype.name == "uint8"
assert image_aug.shape == image.shape
def test_unusual_channel_numbers(self):
shapes = [
(1, 1, 4),
(1, 1, 5),
(1, 1, 512),
(1, 1, 513)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.zeros(shape, dtype=np.uint8)
aug = iaa.AddElementwise(1)
image_aug = aug(image=image)
assert np.all(image_aug == 1)
assert image_aug.dtype.name == "uint8"
assert image_aug.shape == image.shape
def test_get_parameters(self):
# test get_parameters()
aug = iaa.AddElementwise(value=1, per_channel=False)
params = aug.get_parameters()
assert isinstance(params[0], iap.Deterministic)
assert isinstance(params[1], iap.Deterministic)
assert params[0].value == 1
assert params[1].value == 0
def test_heatmaps_dont_change(self):
# test heatmaps (not affected by augmenter)
aug = iaa.AddElementwise(value=10)
hm = ia.quokka_heatmap()
hm_aug = aug.augment_heatmaps([hm])[0]
assert np.allclose(hm.arr_0to1, hm_aug.arr_0to1)
def test_other_dtypes_bool(self):
# bool
image = np.zeros((3, 3), dtype=bool)
aug = iaa.AddElementwise(value=1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 1)
image = np.full((3, 3), True, dtype=bool)
aug = iaa.AddElementwise(value=1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 1)
image = np.full((3, 3), True, dtype=bool)
aug = iaa.AddElementwise(value=-1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 0)
image = np.full((3, 3), True, dtype=bool)
aug = iaa.AddElementwise(value=-2)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 0)
def test_other_dtypes_uint_int(self):
# uint, int
for dtype in [np.uint8, np.uint16, np.int8, np.int16]:
min_value, center_value, max_value = iadt.get_value_range_of_dtype(dtype)
image = np.full((3, 3), min_value, dtype=dtype)
aug = iaa.AddElementwise(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == min_value + 1)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.AddElementwise(11)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == min_value + 21)
image = np.full((3, 3), max_value - 2, dtype=dtype)
aug = iaa.AddElementwise(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == max_value - 1)
image = np.full((3, 3), max_value - 1, dtype=dtype)
aug = iaa.AddElementwise(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == max_value)
image = np.full((3, 3), max_value - 1, dtype=dtype)
aug = iaa.AddElementwise(2)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == max_value)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.AddElementwise(-9)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == min_value + 1)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.AddElementwise(-10)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == min_value)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.AddElementwise(-11)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == min_value)
for _ in sm.xrange(10):
image = np.full((5, 5, 3), 20, dtype=dtype)
aug = iaa.AddElementwise(iap.Uniform(-10, 10))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(10 <= image_aug, image_aug <= 30))
assert len(np.unique(image_aug)) > 1
assert np.all(image_aug[..., 0] == image_aug[..., 1])
image = np.full((1, 1, 100), 20, dtype=dtype)
aug = iaa.AddElementwise(iap.Uniform(-10, 10), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(10 <= image_aug, image_aug <= 30))
assert len(np.unique(image_aug)) > 1
image = np.full((5, 5, 3), 20, dtype=dtype)
aug = iaa.AddElementwise(iap.DiscreteUniform(-10, 10))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(10 <= image_aug, image_aug <= 30))
assert len(np.unique(image_aug)) > 1
assert np.all(image_aug[..., 0] == image_aug[..., 1])
image = np.full((1, 1, 100), 20, dtype=dtype)
aug = iaa.AddElementwise(iap.DiscreteUniform(-10, 10), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(10 <= image_aug, image_aug <= 30))
assert len(np.unique(image_aug)) > 1
def test_other_dtypes_float(self):
# float
for dtype in [np.float16, np.float32]:
min_value, center_value, max_value = iadt.get_value_range_of_dtype(dtype)
if dtype == np.float16:
atol = 1e-3 * max_value
else:
atol = 1e-9 * max_value
_allclose = functools.partial(np.allclose, atol=atol, rtol=0)
image = np.full((3, 3), min_value, dtype=dtype)
aug = iaa.AddElementwise(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, min_value + 1)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.AddElementwise(11)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, min_value + 21)
image = np.full((3, 3), max_value - 2, dtype=dtype)
aug = iaa.AddElementwise(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, max_value - 1)
image = np.full((3, 3), max_value - 1, dtype=dtype)
aug = iaa.AddElementwise(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, max_value)
image = np.full((3, 3), max_value - 1, dtype=dtype)
aug = iaa.AddElementwise(2)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, max_value)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.AddElementwise(-9)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, min_value + 1)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.AddElementwise(-10)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, min_value)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.AddElementwise(-11)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, min_value)
for _ in sm.xrange(10):
image = np.full((50, 1, 3), 0, dtype=dtype)
aug = iaa.AddElementwise(iap.Uniform(-10, 10))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-10 - 1e-2 < image_aug, image_aug < 10 + 1e-2))
assert not np.allclose(image_aug[1:, :, 0], image_aug[:-1, :, 0])
assert np.allclose(image_aug[..., 0], image_aug[..., 1])
image = np.full((1, 1, 100), 0, dtype=dtype)
aug = iaa.AddElementwise(iap.Uniform(-10, 10), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-10 - 1e-2 < image_aug, image_aug < 10 + 1e-2))
assert not np.allclose(image_aug[:, :, 1:], image_aug[:, :, :-1])
image = np.full((50, 1, 3), 0, dtype=dtype)
aug = iaa.AddElementwise(iap.DiscreteUniform(-10, 10))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-10 - 1e-2 < image_aug, image_aug < 10 + 1e-2))
assert not np.allclose(image_aug[1:, :, 0], image_aug[:-1, :, 0])
assert np.allclose(image_aug[..., 0], image_aug[..., 1])
image = np.full((1, 1, 100), 0, dtype=dtype)
aug = iaa.AddElementwise(iap.DiscreteUniform(-10, 10), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-10 - 1e-2 < image_aug, image_aug < 10 + 1e-2))
assert not np.allclose(image_aug[:, :, 1:], image_aug[:, :, :-1])
def test_pickleable(self):
aug = iaa.AddElementwise((0, 50), per_channel=True, random_state=1)
runtest_pickleable_uint8_img(aug, iterations=2)
class AdditiveGaussianNoise(unittest.TestCase):
def setUp(self):
reseed()
def test_loc_zero_scale_zero(self):
# no noise, shouldnt change anything
base_img = np.ones((16, 16, 1), dtype=np.uint8) * 128
images = np.array([base_img])
aug = iaa.AdditiveGaussianNoise(loc=0, scale=0)
observed = aug.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
def test_loc_zero_scale_nonzero(self):
# zero-centered noise
base_img = np.ones((16, 16, 1), dtype=np.uint8) * 128
images = np.array([base_img])
images_list = [base_img]
keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=0, y=0), ia.Keypoint(x=1, y=1),
ia.Keypoint(x=2, y=2)], shape=base_img.shape)]
aug = iaa.AdditiveGaussianNoise(loc=0, scale=0.2 * 255)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
assert not np.array_equal(observed, images)
observed = aug_det.augment_images(images)
assert not np.array_equal(observed, images)
observed = aug.augment_images(images_list)
assert not array_equal_lists(observed, images_list)
observed = aug_det.augment_images(images_list)
assert not array_equal_lists(observed, images_list)
observed = aug.augment_keypoints(keypoints)
assert keypoints_equal(observed, keypoints)
observed = aug_det.augment_keypoints(keypoints)
assert keypoints_equal(observed, keypoints)
def test_std_dev_of_added_noise_matches_scale(self):
# std correct?
base_img = np.ones((16, 16, 1), dtype=np.uint8) * 128
aug = iaa.AdditiveGaussianNoise(loc=0, scale=0.2 * 255)
images = np.ones((1, 1, 1, 1), dtype=np.uint8) * 128
nb_iterations = 1000
values = []
for i in sm.xrange(nb_iterations):
images_aug = aug.augment_images(images)
values.append(images_aug[0, 0, 0, 0])
values = np.array(values)
assert np.min(values) == 0
assert 0.1 < np.std(values) / 255.0 < 0.4
def test_nonzero_loc(self):
# non-zero loc
base_img = np.ones((16, 16, 1), dtype=np.uint8) * 128
aug = iaa.AdditiveGaussianNoise(loc=0.25 * 255, scale=0.01 * 255)
images = np.ones((1, 1, 1, 1), dtype=np.uint8) * 128
nb_iterations = 1000
values = []
for i in sm.xrange(nb_iterations):
images_aug = aug.augment_images(images)
values.append(images_aug[0, 0, 0, 0] - 128)
values = np.array(values)
assert 54 < np.average(values) < 74 # loc=0.25 should be around 255*0.25=64 average
def test_tuple_as_loc(self):
# varying locs
base_img = np.ones((16, 16, 1), dtype=np.uint8) * 128
aug = iaa.AdditiveGaussianNoise(loc=(0, 0.5 * 255), scale=0.0001 * 255)
aug_det = aug.to_deterministic()
images = np.ones((1, 1, 1, 1), dtype=np.uint8) * 128
last_aug = None
last_aug_det = None
nb_changed_aug = 0
nb_changed_aug_det = 0
nb_iterations = 1000
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
observed_aug_det = aug_det.augment_images(images)
if i == 0:
last_aug = observed_aug
last_aug_det = observed_aug_det
else:
if not np.array_equal(observed_aug, last_aug):
nb_changed_aug += 1
if not np.array_equal(observed_aug_det, last_aug_det):
nb_changed_aug_det += 1
last_aug = observed_aug
last_aug_det = observed_aug_det
assert nb_changed_aug >= int(nb_iterations * 0.95)
assert nb_changed_aug_det == 0
def test_stochastic_parameter_as_loc(self):
# varying locs by stochastic param
base_img = np.ones((16, 16, 1), dtype=np.uint8) * 128
aug = iaa.AdditiveGaussianNoise(loc=iap.Choice([-20, 20]), scale=0.0001 * 255)
images = np.ones((1, 1, 1, 1), dtype=np.uint8) * 128
seen = [0, 0]
for i in sm.xrange(200):
observed = aug.augment_images(images)
mean = np.mean(observed)
diff_m20 = abs(mean - (128-20))
diff_p20 = abs(mean - (128+20))
if diff_m20 <= 1:
seen[0] += 1
elif diff_p20 <= 1:
seen[1] += 1
else:
assert False
assert 75 < seen[0] < 125
assert 75 < seen[1] < 125
def test_tuple_as_scale(self):
# varying stds
base_img = np.ones((16, 16, 1), dtype=np.uint8) * 128
aug = iaa.AdditiveGaussianNoise(loc=0, scale=(0.01 * 255, 0.2 * 255))
aug_det = aug.to_deterministic()
images = np.ones((1, 1, 1, 1), dtype=np.uint8) * 128
last_aug = None
last_aug_det = None
nb_changed_aug = 0
nb_changed_aug_det = 0
nb_iterations = 1000
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
observed_aug_det = aug_det.augment_images(images)
if i == 0:
last_aug = observed_aug
last_aug_det = observed_aug_det
else:
if not np.array_equal(observed_aug, last_aug):
nb_changed_aug += 1
if not np.array_equal(observed_aug_det, last_aug_det):
nb_changed_aug_det += 1
last_aug = observed_aug
last_aug_det = observed_aug_det
assert nb_changed_aug >= int(nb_iterations * 0.95)
assert nb_changed_aug_det == 0
def test_stochastic_parameter_as_scale(self):
# varying stds by stochastic param
base_img = np.ones((16, 16, 1), dtype=np.uint8) * 128
aug = iaa.AdditiveGaussianNoise(loc=0, scale=iap.Choice([1, 20]))
images = np.ones((1, 20, 20, 1), dtype=np.uint8) * 128
seen = [0, 0, 0]
for i in sm.xrange(200):
observed = aug.augment_images(images)
std = np.std(observed.astype(np.int32) - 128)
diff_1 = abs(std - 1)
diff_20 = abs(std - 20)
if diff_1 <= 2:
seen[0] += 1
elif diff_20 <= 5:
seen[1] += 1
else:
seen[2] += 1
assert seen[2] <= 5
assert 75 < seen[0] < 125
assert 75 < seen[1] < 125
def test___init___bad_datatypes(self):
# test exceptions for wrong parameter types
got_exception = False
try:
_ = iaa.AdditiveGaussianNoise(loc="test")
except Exception:
got_exception = True
assert got_exception
got_exception = False
try:
_ = iaa.AdditiveGaussianNoise(scale="test")
except Exception:
got_exception = True
assert got_exception
def test_heatmaps_dont_change(self):
# test heatmaps (not affected by augmenter)
base_img = np.ones((16, 16, 1), dtype=np.uint8) * 128
aug = iaa.AdditiveGaussianNoise(loc=0.5, scale=10)
hm = ia.quokka_heatmap()
hm_aug = aug.augment_heatmaps([hm])[0]
assert np.allclose(hm.arr_0to1, hm_aug.arr_0to1)
def test_pickleable(self):
aug = iaa.AdditiveGaussianNoise(scale=(0.1, 10), per_channel=True,
random_state=1)
runtest_pickleable_uint8_img(aug, iterations=2)
class TestDropout(unittest.TestCase):
def setUp(self):
reseed()
def test_p_is_zero(self):
# no dropout, shouldnt change anything
base_img = np.ones((512, 512, 1), dtype=np.uint8) * 255
images = np.array([base_img])
images_list = [base_img]
aug = iaa.Dropout(p=0)
observed = aug.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
observed = aug.augment_images(images_list)
expected = images_list
assert array_equal_lists(observed, expected)
# 100% dropout, should drop everything
aug = iaa.Dropout(p=1.0)
observed = aug.augment_images(images)
expected = np.zeros((1, 512, 512, 1), dtype=np.uint8)
assert np.array_equal(observed, expected)
observed = aug.augment_images(images_list)
expected = [np.zeros((512, 512, 1), dtype=np.uint8)]
assert array_equal_lists(observed, expected)
def test_p_is_50_percent(self):
# 50% dropout
base_img = np.ones((512, 512, 1), dtype=np.uint8) * 255
images = np.array([base_img])
images_list = [base_img]
keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=0, y=0), ia.Keypoint(x=1, y=1),
ia.Keypoint(x=2, y=2)], shape=base_img.shape)]
aug = iaa.Dropout(p=0.5)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
assert not np.array_equal(observed, images)
percent_nonzero = len(observed.flatten().nonzero()[0]) \
/ (base_img.shape[0] * base_img.shape[1] * base_img.shape[2])
assert 0.35 <= (1 - percent_nonzero) <= 0.65
observed = aug_det.augment_images(images)
assert not np.array_equal(observed, images)
percent_nonzero = len(observed.flatten().nonzero()[0]) \
/ (base_img.shape[0] * base_img.shape[1] * base_img.shape[2])
assert 0.35 <= (1 - percent_nonzero) <= 0.65
observed = aug.augment_images(images_list)
assert not array_equal_lists(observed, images_list)
percent_nonzero = len(observed[0].flatten().nonzero()[0]) \
/ (base_img.shape[0] * base_img.shape[1] * base_img.shape[2])
assert 0.35 <= (1 - percent_nonzero) <= 0.65
observed = aug_det.augment_images(images_list)
assert not array_equal_lists(observed, images_list)
percent_nonzero = len(observed[0].flatten().nonzero()[0]) \
/ (base_img.shape[0] * base_img.shape[1] * base_img.shape[2])
assert 0.35 <= (1 - percent_nonzero) <= 0.65
observed = aug.augment_keypoints(keypoints)
assert keypoints_equal(observed, keypoints)
observed = aug_det.augment_keypoints(keypoints)
assert keypoints_equal(observed, keypoints)
def test_tuple_as_p(self):
# varying p
aug = iaa.Dropout(p=(0.0, 1.0))
aug_det = aug.to_deterministic()
images = np.ones((1, 8, 8, 1), dtype=np.uint8) * 255
last_aug = None
last_aug_det = None
nb_changed_aug = 0
nb_changed_aug_det = 0
nb_iterations = 1000
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
observed_aug_det = aug_det.augment_images(images)
if i == 0:
last_aug = observed_aug
last_aug_det = observed_aug_det
else:
if not np.array_equal(observed_aug, last_aug):
nb_changed_aug += 1
if not np.array_equal(observed_aug_det, last_aug_det):
nb_changed_aug_det += 1
last_aug = observed_aug
last_aug_det = observed_aug_det
assert nb_changed_aug >= int(nb_iterations * 0.95)
assert nb_changed_aug_det == 0
def test_list_as_p(self):
aug = iaa.Dropout(p=[0.0, 0.5, 1.0])
images = np.ones((1, 20, 20, 1), dtype=np.uint8) * 255
nb_seen = [0, 0, 0, 0]
nb_iterations = 1000
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
n_dropped = np.sum(observed_aug == 0)
p_observed = n_dropped / observed_aug.size
if 0 <= p_observed <= 0.01:
nb_seen[0] += 1
elif 0.5 - 0.05 <= p_observed <= 0.5 + 0.05:
nb_seen[1] += 1
elif 1.0-0.01 <= p_observed <= 1.0:
nb_seen[2] += 1
else:
nb_seen[3] += 1
assert np.allclose(nb_seen[0:3], nb_iterations*0.33, rtol=0, atol=75)
assert nb_seen[3] < 30
def test_stochastic_parameter_as_p(self):
# varying p by stochastic parameter
aug = iaa.Dropout(p=iap.Binomial(1-iap.Choice([0.0, 0.5])))
images = np.ones((1, 20, 20, 1), dtype=np.uint8) * 255
seen = [0, 0, 0]
for i in sm.xrange(400):
observed = aug.augment_images(images)
p = np.mean(observed == 0)
if 0.4 < p < 0.6:
seen[0] += 1
elif p < 0.1:
seen[1] += 1
else:
seen[2] += 1
assert seen[2] <= 10
assert 150 < seen[0] < 250
assert 150 < seen[1] < 250
def test___init___bad_datatypes(self):
# test exception for wrong parameter datatype
got_exception = False
try:
_aug = iaa.Dropout(p="test")
except Exception:
got_exception = True
assert got_exception
def test_heatmaps_dont_change(self):
# test heatmaps (not affected by augmenter)
aug = iaa.Dropout(p=1.0)
hm = ia.quokka_heatmap()
hm_aug = aug.augment_heatmaps([hm])[0]
assert np.allclose(hm.arr_0to1, hm_aug.arr_0to1)
def test_pickleable(self):
aug = iaa.Dropout(p=0.5, per_channel=True, random_state=1)
runtest_pickleable_uint8_img(aug, iterations=3)
class TestCoarseDropout(unittest.TestCase):
def setUp(self):
reseed()
def test_p_is_zero(self):
base_img = np.ones((16, 16, 1), dtype=np.uint8) * 100
aug = iaa.CoarseDropout(p=0, size_px=4, size_percent=None, per_channel=False, min_size=4)
observed = aug.augment_image(base_img)
expected = base_img
assert np.array_equal(observed, expected)
def test_p_is_one(self):
base_img = np.ones((16, 16, 1), dtype=np.uint8) * 100
aug = iaa.CoarseDropout(p=1.0, size_px=4, size_percent=None, per_channel=False, min_size=4)
observed = aug.augment_image(base_img)
expected = np.zeros_like(base_img)
assert np.array_equal(observed, expected)
def test_p_is_50_percent(self):
base_img = np.ones((16, 16, 1), dtype=np.uint8) * 100
aug = iaa.CoarseDropout(p=0.5, size_px=1, size_percent=None, per_channel=False, min_size=1)
averages = []
for _ in sm.xrange(50):
observed = aug.augment_image(base_img)
averages.append(np.average(observed))
assert all([v in [0, 100] for v in averages])
assert 50 - 20 < np.average(averages) < 50 + 20
def test_size_percent(self):
base_img = np.ones((16, 16, 1), dtype=np.uint8) * 100
aug = iaa.CoarseDropout(p=0.5, size_px=None, size_percent=0.001, per_channel=False, min_size=1)
averages = []
for _ in sm.xrange(50):
observed = aug.augment_image(base_img)
averages.append(np.average(observed))
assert all([v in [0, 100] for v in averages])
assert 50 - 20 < np.average(averages) < 50 + 20
def test_per_channel(self):
aug = iaa.CoarseDropout(p=0.5, size_px=1, size_percent=None, per_channel=True, min_size=1)
base_img = np.ones((4, 4, 3), dtype=np.uint8) * 100
found = False
for _ in sm.xrange(100):
observed = aug.augment_image(base_img)
avgs = np.average(observed, axis=(0, 1))
if len(set(avgs)) >= 2:
found = True
break
assert found
def test_stochastic_parameter_as_p(self):
# varying p by stochastic parameter
aug = iaa.CoarseDropout(p=iap.Binomial(1-iap.Choice([0.0, 0.5])), size_px=50)
images = np.ones((1, 100, 100, 1), dtype=np.uint8) * 255
seen = [0, 0, 0]
for i in sm.xrange(400):
observed = aug.augment_images(images)
p = np.mean(observed == 0)
if 0.4 < p < 0.6:
seen[0] += 1
elif p < 0.1:
seen[1] += 1
else:
seen[2] += 1
assert seen[2] <= 10
assert 150 < seen[0] < 250
assert 150 < seen[1] < 250
def test___init___bad_datatypes(self):
# test exception for bad parameters
got_exception = False
try:
_ = iaa.CoarseDropout(p="test")
except Exception:
got_exception = True
assert got_exception
def test___init___size_px_and_size_percent_both_none(self):
got_exception = False
try:
_ = iaa.CoarseDropout(p=0.5, size_px=None, size_percent=None)
except Exception:
got_exception = True
assert got_exception
def test_heatmaps_dont_change(self):
# test heatmaps (not affected by augmenter)
aug = iaa.CoarseDropout(p=1.0, size_px=2)
hm = ia.quokka_heatmap()
hm_aug = aug.augment_heatmaps([hm])[0]
assert np.allclose(hm.arr_0to1, hm_aug.arr_0to1)
def test_pickleable(self):
aug = iaa.CoarseDropout(p=0.5, size_px=10, per_channel=True,
random_state=1)
runtest_pickleable_uint8_img(aug, iterations=10, shape=(40, 40, 3))
class TestDropout2d(unittest.TestCase):
def setUp(self):
reseed()
def test___init___defaults(self):
aug = iaa.Dropout2d(p=0)
assert isinstance(aug.p, iap.Binomial)
assert np.isclose(aug.p.p.value, 1.0)
assert aug.nb_keep_channels == 1
def test___init___p_is_float(self):
aug = iaa.Dropout2d(p=0.7)
assert isinstance(aug.p, iap.Binomial)
assert np.isclose(aug.p.p.value, 0.3)
assert aug.nb_keep_channels == 1
def test___init___nb_keep_channels_is_int(self):
aug = iaa.Dropout2d(p=0, nb_keep_channels=2)
assert isinstance(aug.p, iap.Binomial)
assert np.isclose(aug.p.p.value, 1.0)
assert aug.nb_keep_channels == 2
def test_no_images_in_batch(self):
aug = iaa.Dropout2d(p=0.0, nb_keep_channels=0)
heatmaps = np.float32([
[0.0, 1.0],
[0.0, 1.0]
])
heatmaps = ia.HeatmapsOnImage(heatmaps, shape=(2, 2, 3))
heatmaps_aug = aug(heatmaps=heatmaps)
assert np.allclose(heatmaps_aug.arr_0to1, heatmaps.arr_0to1)
def test_p_is_1(self):
image = np.full((1, 2, 3), 255, dtype=np.uint8)
aug = iaa.Dropout2d(p=1.0, nb_keep_channels=0)
image_aug = aug(image=image)
assert image_aug.shape == image.shape
assert image_aug.dtype.name == image.dtype.name
assert np.sum(image_aug) == 0
def test_p_is_1_heatmaps(self):
aug = iaa.Dropout2d(p=1.0, nb_keep_channels=0)
arr = np.float32([
[0.0, 1.0],
[0.0, 1.0]
])
hm = ia.HeatmapsOnImage(arr, shape=(2, 2, 3))
heatmaps_aug = aug(heatmaps=hm)
assert np.allclose(heatmaps_aug.arr_0to1, 0.0)
def test_p_is_1_segmentation_maps(self):
aug = iaa.Dropout2d(p=1.0, nb_keep_channels=0)
arr = np.int32([
[0, 1],
[0, 1]
])
segmaps = ia.SegmentationMapsOnImage(arr, shape=(2, 2, 3))
segmaps_aug = aug(segmentation_maps=segmaps)
assert np.allclose(segmaps_aug.arr, 0.0)
def test_p_is_1_cbaois(self):
cbaois = [
ia.KeypointsOnImage([ia.Keypoint(x=0, y=1)], shape=(2, 2, 3)),
ia.BoundingBoxesOnImage([ia.BoundingBox(x1=0, y1=1, x2=2, y2=3)],
shape=(2, 2, 3)),
ia.PolygonsOnImage([ia.Polygon([(0, 0), (1, 0), (1, 1)])],
shape=(2, 2, 3)),
ia.LineStringsOnImage([ia.LineString([(0, 0), (1, 0)])],
shape=(2, 2, 3))
]
cbaoi_names = ["keypoints", "bounding_boxes", "polygons",
"line_strings"]
aug = iaa.Dropout2d(p=1.0, nb_keep_channels=0)
for name, cbaoi in zip(cbaoi_names, cbaois):
with self.subTest(datatype=name):
cbaoi_aug = aug(**{name: cbaoi})
assert cbaoi_aug.shape == (2, 2, 3)
assert cbaoi_aug.items == []
def test_p_is_1_heatmaps__keep_one_channel(self):
aug = iaa.Dropout2d(p=1.0, nb_keep_channels=1)
arr = np.float32([
[0.0, 1.0],
[0.0, 1.0]
])
hm = ia.HeatmapsOnImage(arr, shape=(2, 2, 3))
heatmaps_aug = aug(heatmaps=hm)
assert np.allclose(heatmaps_aug.arr_0to1, hm.arr_0to1)
def test_p_is_1_segmentation_maps__keep_one_channel(self):
aug = iaa.Dropout2d(p=1.0, nb_keep_channels=1)
arr = np.int32([
[0, 1],
[0, 1]
])
segmaps = ia.SegmentationMapsOnImage(arr, shape=(2, 2, 3))
segmaps_aug = aug(segmentation_maps=segmaps)
assert np.allclose(segmaps_aug.arr, segmaps.arr)
def test_p_is_1_cbaois__keep_one_channel(self):
cbaois = [
ia.KeypointsOnImage([ia.Keypoint(x=0, y=1)], shape=(2, 2, 3)),
ia.BoundingBoxesOnImage([ia.BoundingBox(x1=0, y1=1, x2=2, y2=3)],
shape=(2, 2, 3)),
ia.PolygonsOnImage([ia.Polygon([(0, 0), (1, 0), (1, 1)])],
shape=(2, 2, 3)),
ia.LineStringsOnImage([ia.LineString([(0, 0), (1, 0)])],
shape=(2, 2, 3))
]
cbaoi_names = ["keypoints", "bounding_boxes", "polygons",
"line_strings"]
aug = iaa.Dropout2d(p=1.0, nb_keep_channels=1)
for name, cbaoi in zip(cbaoi_names, cbaois):
with self.subTest(datatype=name):
cbaoi_aug = aug(**{name: cbaoi})
assert cbaoi_aug.shape == (2, 2, 3)
assert np.allclose(
cbaoi_aug.items[0].coords,
cbaoi.items[0].coords
)
def test_p_is_0(self):
image = np.full((1, 2, 3), 255, dtype=np.uint8)
aug = iaa.Dropout2d(p=0.0, nb_keep_channels=0)
image_aug = aug(image=image)
assert image_aug.shape == image.shape
assert image_aug.dtype.name == image.dtype.name
assert np.array_equal(image_aug, image)
def test_p_is_0_heatmaps(self):
aug = iaa.Dropout2d(p=0.0, nb_keep_channels=0)
arr = np.float32([
[0.0, 1.0],
[0.0, 1.0]
])
hm = ia.HeatmapsOnImage(arr, shape=(2, 2, 3))
heatmaps_aug = aug(heatmaps=hm)
assert np.allclose(heatmaps_aug.arr_0to1, hm.arr_0to1)
def test_p_is_0_segmentation_maps(self):
aug = iaa.Dropout2d(p=0.0, nb_keep_channels=0)
arr = np.int32([
[0, 1],
[0, 1]
])
segmaps = ia.SegmentationMapsOnImage(arr, shape=(2, 2, 3))
segmaps_aug = aug(segmentation_maps=segmaps)
assert np.allclose(segmaps_aug.arr, segmaps.arr)
def test_p_is_0_cbaois(self):
cbaois = [
ia.KeypointsOnImage([ia.Keypoint(x=0, y=1)], shape=(2, 2, 3)),
ia.BoundingBoxesOnImage([ia.BoundingBox(x1=0, y1=1, x2=2, y2=3)],
shape=(2, 2, 3)),
ia.PolygonsOnImage([ia.Polygon([(0, 0), (1, 0), (1, 1)])],
shape=(2, 2, 3)),
ia.LineStringsOnImage([ia.LineString([(0, 0), (1, 0)])],
shape=(2, 2, 3))
]
cbaoi_names = ["keypoints", "bounding_boxes", "polygons",
"line_strings"]
aug = iaa.Dropout2d(p=0.0, nb_keep_channels=0)
for name, cbaoi in zip(cbaoi_names, cbaois):
with self.subTest(datatype=name):
cbaoi_aug = aug(**{name: cbaoi})
assert cbaoi_aug.shape == (2, 2, 3)
assert np.allclose(
cbaoi_aug.items[0].coords,
cbaoi.items[0].coords
)
def test_p_is_075(self):
image = np.full((1, 1, 3000), 255, dtype=np.uint8)
aug = iaa.Dropout2d(p=0.75, nb_keep_channels=0)
image_aug = aug(image=image)
nb_kept = np.sum(image_aug == 255)
nb_dropped = image.shape[2] - nb_kept
assert image_aug.shape == image.shape
assert image_aug.dtype.name == image.dtype.name
assert np.isclose(nb_dropped, image.shape[2]*0.75, atol=75)
def test_force_nb_keep_channels(self):
image = np.full((1, 1, 3), 255, dtype=np.uint8)
images = np.array([image] * 1000)
aug = iaa.Dropout2d(p=1.0, nb_keep_channels=1)
images_aug = aug(images=images)
ids_kept = [np.nonzero(image[0, 0, :]) for image in images_aug]
ids_kept_uq = np.unique(ids_kept)
nb_kept = np.sum(images_aug == 255)
nb_dropped = (len(images) * images.shape[3]) - nb_kept
assert images_aug.shape == images.shape
assert images_aug.dtype.name == images.dtype.name
# on average, keep 1 of 3 channels
# due to p=1.0 we expect to get exactly 2/3 dropped
assert np.isclose(nb_dropped,
(len(images)*images.shape[3])*(2/3), atol=1)
# every channel dropped at least once, i.e. which one is kept is random
assert sorted(ids_kept_uq.tolist()) == [0, 1, 2]
def test_some_images_below_nb_keep_channels(self):
image_2c = np.full((1, 1, 2), 255, dtype=np.uint8)
image_3c = np.full((1, 1, 3), 255, dtype=np.uint8)
images = [image_2c if i % 2 == 0 else image_3c
for i in sm.xrange(100)]
aug = iaa.Dropout2d(p=1.0, nb_keep_channels=2)
images_aug = aug(images=images)
for i, image_aug in enumerate(images_aug):
assert np.sum(image_aug == 255) == 2
if i % 2 == 0:
assert np.sum(image_aug == 0) == 0
else:
assert np.sum(image_aug == 0) == 1
def test_all_images_below_nb_keep_channels(self):
image = np.full((1, 1, 2), 255, dtype=np.uint8)
images = np.array([image] * 100)
aug = iaa.Dropout2d(p=1.0, nb_keep_channels=3)
images_aug = aug(images=images)
nb_kept = np.sum(images_aug == 255)
nb_dropped = (len(images) * images.shape[3]) - nb_kept
assert nb_dropped == 0
def test_get_parameters(self):
aug = iaa.Dropout2d(p=0.7, nb_keep_channels=2)
params = aug.get_parameters()
assert isinstance(params[0], iap.Binomial)
assert np.isclose(params[0].p.value, 0.3)
assert params[1] == 2
def test_zero_sized_axes(self):
shapes = [
(0, 0),
(0, 1),
(1, 0),
(0, 1, 0),
(1, 0, 0),
(0, 1, 1),
(1, 0, 1)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.full(shape, 255, dtype=np.uint8)
aug = iaa.Dropout2d(1.0, nb_keep_channels=0)
image_aug = aug(image=image)
assert image_aug.dtype.name == "uint8"
assert image_aug.shape == image.shape
def test_other_dtypes_bool(self):
image = np.full((1, 1, 10), 1, dtype=bool)
aug = iaa.Dropout2d(p=1.0, nb_keep_channels=3)
image_aug = aug(image=image)
assert image_aug.shape == image.shape
assert image_aug.dtype.name == "bool"
assert np.sum(image_aug == 1) == 3
assert np.sum(image_aug == 0) == 7
def test_other_dtypes_uint_int(self):
dts = ["uint8", "uint16", "uint32", "uint64",
"int8", "int16", "int32", "int64"]
for dt in dts:
min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dt)
values = [min_value, int(center_value), max_value]
for value in values:
with self.subTest(dtype=dt, value=value):
image = np.full((1, 1, 10), value, dtype=dt)
aug = iaa.Dropout2d(p=1.0, nb_keep_channels=3)
image_aug = aug(image=image)
assert image_aug.shape == image.shape
assert image_aug.dtype.name == dt
if value == 0:
assert np.sum(image_aug == value) == 10
else:
assert np.sum(image_aug == value) == 3
assert np.sum(image_aug == 0) == 7
def test_other_dtypes_float(self):
dts = ["float16", "float32", "float64", "float128"]
for dt in dts:
min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dt)
values = [min_value, -10.0, center_value, 10.0, max_value]
atol = 1e-3*max_value if dt == "float16" else 1e-9 * max_value
_isclose = functools.partial(np.isclose, atol=atol, rtol=0)
for value in values:
with self.subTest(dtype=dt, value=value):
image = np.full((1, 1, 10), value, dtype=dt)
aug = iaa.Dropout2d(p=1.0, nb_keep_channels=3)
image_aug = aug(image=image)
assert image_aug.shape == image.shape
assert image_aug.dtype.name == dt
if _isclose(value, 0.0):
assert np.sum(_isclose(image_aug, value)) == 10
else:
assert (
np.sum(_isclose(image_aug, np.float128(value)))
== 3)
assert np.sum(image_aug == 0) == 7
def test_pickleable(self):
aug = iaa.Dropout2d(p=0.5, random_state=1)
runtest_pickleable_uint8_img(aug, iterations=3, shape=(1, 1, 50))
class TestTotalDropout(unittest.TestCase):
def setUp(self):
reseed()
def test___init___p(self):
aug = iaa.TotalDropout(p=0)
assert isinstance(aug.p, iap.Binomial)
assert np.isclose(aug.p.p.value, 1.0)
def test_p_is_1(self):
image = np.full((1, 2, 3), 255, dtype=np.uint8)
aug = iaa.TotalDropout(p=1.0)
image_aug = aug(image=image)
assert image_aug.shape == image.shape
assert image_aug.dtype.name == image.dtype.name
assert np.sum(image_aug) == 0
def test_p_is_1_multiple_images_list(self):
image = np.full((1, 2, 3), 255, dtype=np.uint8)
images = [image, image, image]
aug = iaa.TotalDropout(p=1.0)
images_aug = aug(images=images)
for image_aug, image_ in zip(images_aug, images):
assert image_aug.shape == image_.shape
assert image_aug.dtype.name == image_.dtype.name
assert np.sum(image_aug) == 0
def test_p_is_1_multiple_images_array(self):
image = np.full((1, 2, 3), 255, dtype=np.uint8)
images = np.array([image, image, image], dtype=np.uint8)
aug = iaa.TotalDropout(p=1.0)
images_aug = aug(images=images)
assert images_aug.shape == images.shape
assert images_aug.dtype.name == images.dtype.name
assert np.sum(images_aug) == 0
def test_p_is_1_heatmaps(self):
aug = iaa.TotalDropout(p=1.0)
arr = np.float32([
[0.0, 1.0],
[0.0, 1.0]
])
hm = ia.HeatmapsOnImage(arr, shape=(2, 2, 3))
heatmaps_aug = aug(heatmaps=hm)
assert np.allclose(heatmaps_aug.arr_0to1, 0.0)
def test_p_is_1_segmentation_maps(self):
aug = iaa.TotalDropout(p=1.0)
arr = np.int32([
[0, 1],
[0, 1]
])
segmaps = ia.SegmentationMapsOnImage(arr, shape=(2, 2, 3))
segmaps_aug = aug(segmentation_maps=segmaps)
assert np.allclose(segmaps_aug.arr, 0.0)
def test_p_is_1_cbaois(self):
cbaois = [
ia.KeypointsOnImage([ia.Keypoint(x=0, y=1)], shape=(2, 2, 3)),
ia.BoundingBoxesOnImage([ia.BoundingBox(x1=0, y1=1, x2=2, y2=3)],
shape=(2, 2, 3)),
ia.PolygonsOnImage([ia.Polygon([(0, 0), (1, 0), (1, 1)])],
shape=(2, 2, 3)),
ia.LineStringsOnImage([ia.LineString([(0, 0), (1, 0)])],
shape=(2, 2, 3))
]
cbaoi_names = ["keypoints", "bounding_boxes", "polygons",
"line_strings"]
aug = iaa.TotalDropout(p=1.0)
for name, cbaoi in zip(cbaoi_names, cbaois):
with self.subTest(datatype=name):
cbaoi_aug = aug(**{name: cbaoi})
assert cbaoi_aug.shape == (2, 2, 3)
assert cbaoi_aug.items == []
def test_p_is_0(self):
image = np.full((1, 2, 3), 255, dtype=np.uint8)
aug = iaa.TotalDropout(p=0.0)
image_aug = aug(image=image)
assert image_aug.shape == image.shape
assert image_aug.dtype.name == image.dtype.name
assert np.array_equal(image_aug, image)
def test_p_is_0_multiple_images_list(self):
image = np.full((1, 2, 3), 255, dtype=np.uint8)
images = [image, image, image]
aug = iaa.TotalDropout(p=0.0)
images_aug = aug(images=images)
for image_aug, image_ in zip(images_aug, images):
assert image_aug.shape == image_.shape
assert image_aug.dtype.name == image_.dtype.name
assert np.array_equal(image_aug, image_)
def test_p_is_0_multiple_images_array(self):
image = np.full((1, 2, 3), 255, dtype=np.uint8)
images = np.array([image, image, image], dtype=np.uint8)
aug = iaa.TotalDropout(p=0.0)
images_aug = aug(images=images)
for image_aug, image_ in zip(images_aug, images):
assert image_aug.shape == image_.shape
assert image_aug.dtype.name == image_.dtype.name
assert np.array_equal(image_aug, image_)
def test_p_is_0_heatmaps(self):
aug = iaa.TotalDropout(p=0.0)
arr = np.float32([
[0.0, 1.0],
[0.0, 1.0]
])
hm = ia.HeatmapsOnImage(arr, shape=(2, 2, 3))
heatmaps_aug = aug(heatmaps=hm)
assert np.allclose(heatmaps_aug.arr_0to1, hm.arr_0to1)
def test_p_is_0_segmentation_maps(self):
aug = iaa.TotalDropout(p=0.0)
arr = np.int32([
[0, 1],
[0, 1]
])
segmaps = ia.SegmentationMapsOnImage(arr, shape=(2, 2, 3))
segmaps_aug = aug(segmentation_maps=segmaps)
assert np.allclose(segmaps_aug.arr, segmaps.arr)
def test_p_is_0_cbaois(self):
cbaois = [
ia.KeypointsOnImage([ia.Keypoint(x=0, y=1)], shape=(2, 2, 3)),
ia.BoundingBoxesOnImage([ia.BoundingBox(x1=0, y1=1, x2=2, y2=3)],
shape=(2, 2, 3)),
ia.PolygonsOnImage([ia.Polygon([(0, 0), (1, 0), (1, 1)])],
shape=(2, 2, 3)),
ia.LineStringsOnImage([ia.LineString([(0, 0), (1, 0)])],
shape=(2, 2, 3))
]
cbaoi_names = ["keypoints", "bounding_boxes", "polygons",
"line_strings"]
aug = iaa.TotalDropout(p=0.0)
for name, cbaoi in zip(cbaoi_names, cbaois):
with self.subTest(datatype=name):
cbaoi_aug = aug(**{name: cbaoi})
assert cbaoi_aug.shape == (2, 2, 3)
assert np.allclose(
cbaoi_aug.items[0].coords,
cbaoi.items[0].coords
)
def test_p_is_075_multiple_images_list(self):
images = [np.full((1, 1, 1), 255, dtype=np.uint8)] * 3000
aug = iaa.TotalDropout(p=0.75)
images_aug = aug(images=images)
nb_kept = np.sum([np.sum(image_aug == 255) for image_aug in images_aug])
nb_dropped = len(images) - nb_kept
for image_aug in images_aug:
assert image_aug.shape == images[0].shape
assert image_aug.dtype.name == images[0].dtype.name
assert np.isclose(nb_dropped, len(images)*0.75, atol=75)
def test_p_is_075_multiple_images_array(self):
images = np.full((3000, 1, 1, 1), 255, dtype=np.uint8)
aug = iaa.TotalDropout(p=0.75)
images_aug = aug(images=images)
nb_kept = np.sum(images_aug == 255)
nb_dropped = len(images) - nb_kept
assert images_aug.shape == images.shape
assert images_aug.dtype.name == images.dtype.name
assert np.isclose(nb_dropped, len(images)*0.75, atol=75)
def test_get_parameters(self):
aug = iaa.TotalDropout(p=0.0)
params = aug.get_parameters()
assert params[0] is aug.p
def test_unusual_channel_numbers(self):
shapes = [
(5, 1, 1, 4),
(5, 1, 1, 5),
(5, 1, 1, 512),
(5, 1, 1, 513)
]
for shape in shapes:
with self.subTest(shape=shape):
images = np.zeros(shape, dtype=np.uint8)
aug = iaa.TotalDropout(1.0)
images_aug = aug(images=images)
assert np.all(images_aug == 0)
assert images_aug.dtype.name == "uint8"
assert images_aug.shape == shape
def test_zero_sized_axes(self):
shapes = [
(5, 0, 0),
(5, 0, 1),
(5, 1, 0),
(5, 0, 1, 0),
(5, 1, 0, 0),
(5, 0, 1, 1),
(5, 1, 0, 1)
]
for shape in shapes:
with self.subTest(shape=shape):
images = np.full(shape, 255, dtype=np.uint8)
aug = iaa.TotalDropout(1.0)
images_aug = aug(images=images)
assert images_aug.dtype.name == "uint8"
assert images_aug.shape == images.shape
def test_other_dtypes_bool(self):
image = np.full((1, 1, 10), 1, dtype=bool)
aug = iaa.TotalDropout(p=1.0)
image_aug = aug(image=image)
assert image_aug.shape == image.shape
assert image_aug.dtype.name == "bool"
assert np.sum(image_aug == 1) == 0
def test_other_dtypes_uint_int(self):
dts = ["uint8", "uint16", "uint32", "uint64",
"int8", "int16", "int32", "int64"]
for dt in dts:
min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dt)
values = [min_value, int(center_value), max_value]
for value in values:
for p in [1.0, 0.0]:
with self.subTest(dtype=dt, value=value, p=p):
images = np.full((5, 1, 1, 3), value, dtype=dt)
aug = iaa.TotalDropout(p=p)
images_aug = aug(images=images)
assert images_aug.shape == images.shape
assert images_aug.dtype.name == dt
if np.isclose(p, 1.0) or value == 0:
assert np.sum(images_aug == 0) == 5*3
else:
assert np.sum(images_aug == value) == 5*3
def test_other_dtypes_float(self):
dts = ["float16", "float32", "float64", "float128"]
for dt in dts:
min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dt)
values = [min_value, -10.0, center_value, 10.0, max_value]
atol = 1e-3*max_value if dt == "float16" else 1e-9 * max_value
_isclose = functools.partial(np.isclose, atol=atol, rtol=0)
for value in values:
for p in [1.0, 0.0]:
with self.subTest(dtype=dt, value=value, p=p):
images = np.full((5, 1, 1, 3), value, dtype=dt)
aug = iaa.TotalDropout(p=p)
images_aug = aug(images=images)
assert images_aug.shape == images.shape
assert images_aug.dtype.name == dt
if np.isclose(p, 1.0):
assert np.sum(_isclose(images_aug, 0.0)) == 5*3
else:
assert (
np.sum(_isclose(images_aug, np.float128(value)))
== 5*3)
def test_pickleable(self):
aug = iaa.TotalDropout(p=0.5, random_state=1)
runtest_pickleable_uint8_img(aug, iterations=30, shape=(4, 4, 2))
class TestMultiply(unittest.TestCase):
def setUp(self):
reseed()
def test_mul_is_one(self):
# no multiply, shouldnt change anything
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
images_list = [base_img]
aug = iaa.Multiply(mul=1.0)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
assert observed.shape == (1, 3, 3, 1)
observed = aug.augment_images(images_list)
expected = images_list
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = images_list
assert array_equal_lists(observed, expected)
def test_mul_is_above_one(self):
# multiply >1.0
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
images_list = [base_img]
aug = iaa.Multiply(mul=1.2)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = np.ones((1, 3, 3, 1), dtype=np.uint8) * 120
assert np.array_equal(observed, expected)
assert observed.shape == (1, 3, 3, 1)
observed = aug.augment_images(images_list)
expected = [np.ones((3, 3, 1), dtype=np.uint8) * 120]
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = np.ones((1, 3, 3, 1), dtype=np.uint8) * 120
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = [np.ones((3, 3, 1), dtype=np.uint8) * 120]
assert array_equal_lists(observed, expected)
def test_mul_is_below_one(self):
# multiply <1.0
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
images_list = [base_img]
aug = iaa.Multiply(mul=0.8)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = np.ones((1, 3, 3, 1), dtype=np.uint8) * 80
assert np.array_equal(observed, expected)
assert observed.shape == (1, 3, 3, 1)
observed = aug.augment_images(images_list)
expected = [np.ones((3, 3, 1), dtype=np.uint8) * 80]
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = np.ones((1, 3, 3, 1), dtype=np.uint8) * 80
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = [np.ones((3, 3, 1), dtype=np.uint8) * 80]
assert array_equal_lists(observed, expected)
def test_keypoints_dont_change(self):
# keypoints shouldnt be changed
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=0, y=0), ia.Keypoint(x=1, y=1),
ia.Keypoint(x=2, y=2)], shape=base_img.shape)]
aug = iaa.Multiply(mul=1.2)
aug_det = iaa.Multiply(mul=1.2).to_deterministic()
observed = aug.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
observed = aug_det.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
def test_tuple_as_mul(self):
# varying multiply factors
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
aug = iaa.Multiply(mul=(0, 2.0))
aug_det = aug.to_deterministic()
last_aug = None
last_aug_det = None
nb_changed_aug = 0
nb_changed_aug_det = 0
nb_iterations = 1000
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
observed_aug_det = aug_det.augment_images(images)
if i == 0:
last_aug = observed_aug
last_aug_det = observed_aug_det
else:
if not np.array_equal(observed_aug, last_aug):
nb_changed_aug += 1
if not np.array_equal(observed_aug_det, last_aug_det):
nb_changed_aug_det += 1
last_aug = observed_aug
last_aug_det = observed_aug_det
assert nb_changed_aug >= int(nb_iterations * 0.95)
assert nb_changed_aug_det == 0
def test_per_channel(self):
aug = iaa.Multiply(mul=iap.Choice([0, 2]), per_channel=True)
observed = aug.augment_image(np.ones((1, 1, 100), dtype=np.uint8))
uq = np.unique(observed)
assert observed.shape == (1, 1, 100)
assert 0 in uq
assert 2 in uq
assert len(uq) == 2
def test_per_channel_with_probability(self):
# test channelwise with probability
aug = iaa.Multiply(mul=iap.Choice([0, 2]), per_channel=0.5)
seen = [0, 0]
for _ in sm.xrange(400):
observed = aug.augment_image(np.ones((1, 1, 20), dtype=np.uint8))
assert observed.shape == (1, 1, 20)
uq = np.unique(observed)
per_channel = (len(uq) == 2)
if per_channel:
seen[0] += 1
else:
seen[1] += 1
assert 150 < seen[0] < 250
assert 150 < seen[1] < 250
def test___init___bad_datatypes(self):
# test exceptions for wrong parameter types
got_exception = False
try:
_ = iaa.Multiply(mul="test")
except Exception:
got_exception = True
assert got_exception
got_exception = False
try:
_ = iaa.Multiply(mul=1, per_channel="test")
except Exception:
got_exception = True
assert got_exception
def test_zero_sized_axes(self):
shapes = [
(0, 0),
(0, 1),
(1, 0),
(0, 1, 0),
(1, 0, 0),
(0, 1, 1),
(1, 0, 1)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.ones(shape, dtype=np.uint8)
aug = iaa.Multiply(1)
image_aug = aug(image=image)
assert np.all(image_aug == 2)
assert image_aug.dtype.name == "uint8"
assert image_aug.shape == image.shape
def test_unusual_channel_numbers(self):
shapes = [
(1, 1, 4),
(1, 1, 5),
(1, 1, 512),
(1, 1, 513)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.ones(shape, dtype=np.uint8)
aug = iaa.Multiply(2)
image_aug = aug(image=image)
assert np.all(image_aug == 2)
assert image_aug.dtype.name == "uint8"
assert image_aug.shape == image.shape
def test_get_parameters(self):
# test get_parameters()
aug = iaa.Multiply(mul=1, per_channel=False)
params = aug.get_parameters()
assert isinstance(params[0], iap.Deterministic)
assert isinstance(params[1], iap.Deterministic)
assert params[0].value == 1
assert params[1].value == 0
def test_heatmaps_dont_change(self):
# test heatmaps (not affected by augmenter)
aug = iaa.Multiply(mul=2)
hm = ia.quokka_heatmap()
hm_aug = aug.augment_heatmaps([hm])[0]
assert np.allclose(hm.arr_0to1, hm_aug.arr_0to1)
def test_other_dtypes_bool(self):
# bool
image = np.zeros((3, 3), dtype=bool)
aug = iaa.Multiply(1.0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 0)
image = np.full((3, 3), True, dtype=bool)
aug = iaa.Multiply(1.0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 1)
image = np.full((3, 3), True, dtype=bool)
aug = iaa.Multiply(2.0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 1)
image = np.full((3, 3), True, dtype=bool)
aug = iaa.Multiply(0.0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 0)
image = np.full((3, 3), True, dtype=bool)
aug = iaa.Multiply(-1.0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 0)
def test_other_dtypes_uint_int(self):
# uint, int
for dtype in [np.uint8, np.uint16, np.int8, np.int16]:
dtype = np.dtype(dtype)
min_value, center_value, max_value = iadt.get_value_range_of_dtype(dtype)
image = np.full((3, 3), 10, dtype=dtype)
aug = iaa.Multiply(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == 10)
image = np.full((3, 3), 10, dtype=dtype)
aug = iaa.Multiply(10)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == 100)
image = np.full((3, 3), 10, dtype=dtype)
aug = iaa.Multiply(0.5)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == 5)
image = np.full((3, 3), 0, dtype=dtype)
aug = iaa.Multiply(0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == 0)
if np.dtype(dtype).kind == "u":
image = np.full((3, 3), 10, dtype=dtype)
aug = iaa.Multiply(-1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == 0)
else:
image = np.full((3, 3), 10, dtype=dtype)
aug = iaa.Multiply(-1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == -10)
image = np.full((3, 3), int(center_value), dtype=dtype)
aug = iaa.Multiply(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == int(center_value))
image = np.full((3, 3), int(center_value), dtype=dtype)
aug = iaa.Multiply(1.2)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == int(1.2 * int(center_value)))
if np.dtype(dtype).kind == "u":
image = np.full((3, 3), int(center_value), dtype=dtype)
aug = iaa.Multiply(100)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == max_value)
image = np.full((3, 3), max_value, dtype=dtype)
aug = iaa.Multiply(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == max_value)
# non-uint8 currently don't increase the itemsize
if dtype.name == "uint8":
image = np.full((3, 3), max_value, dtype=dtype)
aug = iaa.Multiply(10)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == max_value)
image = np.full((3, 3), max_value, dtype=dtype)
aug = iaa.Multiply(0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == 0)
# non-uint8 currently don't increase the itemsize
if dtype.name == "uint8":
image = np.full((3, 3), max_value, dtype=dtype)
aug = iaa.Multiply(-2)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == min_value)
# non-uint8 currently don't increase the itemsize
if dtype.name == "uint8":
for _ in sm.xrange(10):
image = np.full((1, 1, 3), 10, dtype=dtype)
aug = iaa.Multiply(iap.Uniform(0.5, 1.5))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(5 <= image_aug, image_aug <= 15))
assert len(np.unique(image_aug)) == 1
image = np.full((1, 1, 100), 10, dtype=dtype)
aug = iaa.Multiply(iap.Uniform(0.5, 1.5), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(5 <= image_aug, image_aug <= 15))
assert len(np.unique(image_aug)) > 1
image = np.full((1, 1, 3), 10, dtype=dtype)
aug = iaa.Multiply(iap.DiscreteUniform(1, 3))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(10 <= image_aug, image_aug <= 30))
assert len(np.unique(image_aug)) == 1
image = np.full((1, 1, 100), 10, dtype=dtype)
aug = iaa.Multiply(iap.DiscreteUniform(1, 3), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(10 <= image_aug, image_aug <= 30))
assert len(np.unique(image_aug)) > 1
def test_other_dtypes_float(self):
# float
for dtype in [np.float16, np.float32]:
min_value, center_value, max_value = iadt.get_value_range_of_dtype(dtype)
if dtype == np.float16:
atol = 1e-3 * max_value
else:
atol = 1e-9 * max_value
_allclose = functools.partial(np.allclose, atol=atol, rtol=0)
image = np.full((3, 3), 10.0, dtype=dtype)
aug = iaa.Multiply(1.0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, 10.0)
image = np.full((3, 3), 10.0, dtype=dtype)
aug = iaa.Multiply(2.0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, 20.0)
# deactivated, because itemsize increase was deactivated
# image = np.full((3, 3), max_value, dtype=dtype)
# aug = iaa.Multiply(-10)
# image_aug = aug.augment_image(image)
# assert image_aug.dtype.type == dtype
# assert _allclose(image_aug, min_value)
image = np.full((3, 3), max_value, dtype=dtype)
aug = iaa.Multiply(0.0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, 0.0)
image = np.full((3, 3), max_value, dtype=dtype)
aug = iaa.Multiply(0.5)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, 0.5*max_value)
# deactivated, because itemsize increase was deactivated
# image = np.full((3, 3), min_value, dtype=dtype)
# aug = iaa.Multiply(-2.0)
# image_aug = aug.augment_image(image)
# assert image_aug.dtype.type == dtype
# assert _allclose(image_aug, max_value)
image = np.full((3, 3), min_value, dtype=dtype)
aug = iaa.Multiply(0.0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, 0.0)
# using tolerances of -100 - 1e-2 and 100 + 1e-2 is not enough for float16, had to be increased to -/+ 1e-1
# deactivated, because itemsize increase was deactivated
"""
for _ in sm.xrange(10):
image = np.full((1, 1, 3), 10.0, dtype=dtype)
aug = iaa.Multiply(iap.Uniform(-10, 10))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-100 - 1e-1 < image_aug, image_aug < 100 + 1e-1))
assert np.allclose(image_aug[1:, :, 0], image_aug[:-1, :, 0])
assert np.allclose(image_aug[..., 0], image_aug[..., 1])
image = np.full((1, 1, 100), 10.0, dtype=dtype)
aug = iaa.Multiply(iap.Uniform(-10, 10), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-100 - 1e-1 < image_aug, image_aug < 100 + 1e-1))
assert not np.allclose(image_aug[:, :, 1:], image_aug[:, :, :-1])
image = np.full((1, 1, 3), 10.0, dtype=dtype)
aug = iaa.Multiply(iap.DiscreteUniform(-10, 10))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-100 - 1e-1 < image_aug, image_aug < 100 + 1e-1))
assert np.allclose(image_aug[1:, :, 0], image_aug[:-1, :, 0])
assert np.allclose(image_aug[..., 0], image_aug[..., 1])
image = np.full((1, 1, 100), 10.0, dtype=dtype)
aug = iaa.Multiply(iap.DiscreteUniform(-10, 10), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-100 - 1e-1 < image_aug, image_aug < 100 + 1e-1))
assert not np.allclose(image_aug[:, :, 1:], image_aug[:, :, :-1])
"""
def test_pickleable(self):
aug = iaa.Multiply((0.5, 1.5), per_channel=True, random_state=1)
runtest_pickleable_uint8_img(aug, iterations=20)
class TestMultiplyElementwise(unittest.TestCase):
def setUp(self):
reseed()
def test_mul_is_one(self):
# no multiply, shouldnt change anything
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
images_list = [base_img]
aug = iaa.MultiplyElementwise(mul=1.0)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
assert observed.shape == (1, 3, 3, 1)
observed = aug.augment_images(images_list)
expected = images_list
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = images_list
assert array_equal_lists(observed, expected)
def test_mul_is_above_one(self):
# multiply >1.0
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
images_list = [base_img]
aug = iaa.MultiplyElementwise(mul=1.2)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = np.ones((1, 3, 3, 1), dtype=np.uint8) * 120
assert np.array_equal(observed, expected)
assert observed.shape == (1, 3, 3, 1)
observed = aug.augment_images(images_list)
expected = [np.ones((3, 3, 1), dtype=np.uint8) * 120]
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = np.ones((1, 3, 3, 1), dtype=np.uint8) * 120
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = [np.ones((3, 3, 1), dtype=np.uint8) * 120]
assert array_equal_lists(observed, expected)
def test_mul_is_below_one(self):
# multiply <1.0
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
images_list = [base_img]
aug = iaa.MultiplyElementwise(mul=0.8)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = np.ones((1, 3, 3, 1), dtype=np.uint8) * 80
assert np.array_equal(observed, expected)
assert observed.shape == (1, 3, 3, 1)
observed = aug.augment_images(images_list)
expected = [np.ones((3, 3, 1), dtype=np.uint8) * 80]
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = np.ones((1, 3, 3, 1), dtype=np.uint8) * 80
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = [np.ones((3, 3, 1), dtype=np.uint8) * 80]
assert array_equal_lists(observed, expected)
def test_keypoints_dont_change(self):
# keypoints shouldnt be changed
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=0, y=0), ia.Keypoint(x=1, y=1),
ia.Keypoint(x=2, y=2)], shape=base_img.shape)]
aug = iaa.MultiplyElementwise(mul=1.2)
aug_det = iaa.Multiply(mul=1.2).to_deterministic()
observed = aug.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
observed = aug_det.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
def test_tuple_as_mul(self):
# varying multiply factors
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
aug = iaa.MultiplyElementwise(mul=(0, 2.0))
aug_det = aug.to_deterministic()
last_aug = None
last_aug_det = None
nb_changed_aug = 0
nb_changed_aug_det = 0
nb_iterations = 1000
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
observed_aug_det = aug_det.augment_images(images)
if i == 0:
last_aug = observed_aug
last_aug_det = observed_aug_det
else:
if not np.array_equal(observed_aug, last_aug):
nb_changed_aug += 1
if not np.array_equal(observed_aug_det, last_aug_det):
nb_changed_aug_det += 1
last_aug = observed_aug
last_aug_det = observed_aug_det
assert nb_changed_aug >= int(nb_iterations * 0.95)
assert nb_changed_aug_det == 0
def test_samples_change_by_spatial_location(self):
# values should change between pixels
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
aug = iaa.MultiplyElementwise(mul=(0.5, 1.5))
nb_same = 0
nb_different = 0
nb_iterations = 1000
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
observed_aug_flat = observed_aug.flatten()
last = None
for j in sm.xrange(observed_aug_flat.size):
if last is not None:
v = observed_aug_flat[j]
if v - 0.0001 <= last <= v + 0.0001:
nb_same += 1
else:
nb_different += 1
last = observed_aug_flat[j]
assert nb_different > 0.95 * (nb_different + nb_same)
def test_per_channel(self):
# test channelwise
aug = iaa.MultiplyElementwise(mul=iap.Choice([0, 1]), per_channel=True)
observed = aug.augment_image(np.ones((100, 100, 3), dtype=np.uint8))
sums = np.sum(observed, axis=2)
values = np.unique(sums)
assert all([(value in values) for value in [0, 1, 2, 3]])
assert observed.shape == (100, 100, 3)
def test_per_channel_with_probability(self):
# test channelwise with probability
aug = iaa.MultiplyElementwise(mul=iap.Choice([0, 1]), per_channel=0.5)
seen = [0, 0]
for _ in sm.xrange(400):
observed = aug.augment_image(np.ones((20, 20, 3), dtype=np.uint8))
assert observed.shape == (20, 20, 3)
sums = np.sum(observed, axis=2)
values = np.unique(sums)
all_values_found = all([(value in values) for value in [0, 1, 2, 3]])
if all_values_found:
seen[0] += 1
else:
seen[1] += 1
assert 150 < seen[0] < 250
assert 150 < seen[1] < 250
def test___init___bad_datatypes(self):
# test exceptions for wrong parameter types
got_exception = False
try:
_aug = iaa.MultiplyElementwise(mul="test")
except Exception:
got_exception = True
assert got_exception
got_exception = False
try:
_aug = iaa.MultiplyElementwise(mul=1, per_channel="test")
except Exception:
got_exception = True
assert got_exception
def test_zero_sized_axes(self):
shapes = [
(0, 0),
(0, 1),
(1, 0),
(0, 1, 0),
(1, 0, 0),
(0, 1, 1),
(1, 0, 1)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.ones(shape, dtype=np.uint8)
aug = iaa.MultiplyElementwise(2)
image_aug = aug(image=image)
assert np.all(image_aug == 2)
assert image_aug.dtype.name == "uint8"
assert image_aug.shape == image.shape
def test_unusual_channel_numbers(self):
shapes = [
(1, 1, 4),
(1, 1, 5),
(1, 1, 512),
(1, 1, 513)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.ones(shape, dtype=np.uint8)
aug = iaa.MultiplyElementwise(2)
image_aug = aug(image=image)
assert np.all(image_aug == 2)
assert image_aug.dtype.name == "uint8"
assert image_aug.shape == image.shape
def test_get_parameters(self):
# test get_parameters()
aug = iaa.MultiplyElementwise(mul=1, per_channel=False)
params = aug.get_parameters()
assert isinstance(params[0], iap.Deterministic)
assert isinstance(params[1], iap.Deterministic)
assert params[0].value == 1
assert params[1].value == 0
def test_heatmaps_dont_change(self):
# test heatmaps (not affected by augmenter)
aug = iaa.MultiplyElementwise(mul=2)
hm = ia.quokka_heatmap()
hm_aug = aug.augment_heatmaps([hm])[0]
assert np.allclose(hm.arr_0to1, hm_aug.arr_0to1)
def test_other_dtypes_bool(self):
# bool
image = np.zeros((3, 3), dtype=bool)
aug = iaa.MultiplyElementwise(1.0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 0)
image = np.full((3, 3), True, dtype=bool)
aug = iaa.MultiplyElementwise(1.0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 1)
image = np.full((3, 3), True, dtype=bool)
aug = iaa.MultiplyElementwise(2.0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 1)
image = np.full((3, 3), True, dtype=bool)
aug = iaa.MultiplyElementwise(0.0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 0)
image = np.full((3, 3), True, dtype=bool)
aug = iaa.MultiplyElementwise(-1.0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 0)
def test_other_dtypes_uint_int(self):
# uint, int
for dtype in [np.uint8, np.uint16, np.int8, np.int16]:
dtype = np.dtype(dtype)
min_value, center_value, max_value = iadt.get_value_range_of_dtype(dtype)
image = np.full((3, 3), 10, dtype=dtype)
aug = iaa.MultiplyElementwise(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == 10)
# deactivated, because itemsize increase was deactivated
# image = np.full((3, 3), 10, dtype=dtype)
# aug = iaa.MultiplyElementwise(10)
# image_aug = aug.augment_image(image)
# assert image_aug.dtype.type == dtype
# assert np.all(image_aug == 100)
image = np.full((3, 3), 10, dtype=dtype)
aug = iaa.MultiplyElementwise(0.5)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == 5)
image = np.full((3, 3), 0, dtype=dtype)
aug = iaa.MultiplyElementwise(0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == 0)
# partially deactivated, because itemsize increase was deactivated
if dtype.name == "uint8":
if dtype.kind == "u":
image = np.full((3, 3), 10, dtype=dtype)
aug = iaa.MultiplyElementwise(-1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == 0)
else:
image = np.full((3, 3), 10, dtype=dtype)
aug = iaa.MultiplyElementwise(-1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == -10)
image = np.full((3, 3), int(center_value), dtype=dtype)
aug = iaa.MultiplyElementwise(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == int(center_value))
# deactivated, because itemsize increase was deactivated
# image = np.full((3, 3), int(center_value), dtype=dtype)
# aug = iaa.MultiplyElementwise(1.2)
# image_aug = aug.augment_image(image)
# assert image_aug.dtype.type == dtype
# assert np.all(image_aug == int(1.2 * int(center_value)))
# deactivated, because itemsize increase was deactivated
if dtype.name == "uint8":
if dtype.kind == "u":
image = np.full((3, 3), int(center_value), dtype=dtype)
aug = iaa.MultiplyElementwise(100)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == max_value)
image = np.full((3, 3), max_value, dtype=dtype)
aug = iaa.MultiplyElementwise(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == max_value)
# deactivated, because itemsize increase was deactivated
# image = np.full((3, 3), max_value, dtype=dtype)
# aug = iaa.MultiplyElementwise(10)
# image_aug = aug.augment_image(image)
# assert image_aug.dtype.type == dtype
# assert np.all(image_aug == max_value)
image = np.full((3, 3), max_value, dtype=dtype)
aug = iaa.MultiplyElementwise(0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == 0)
# deactivated, because itemsize increase was deactivated
# image = np.full((3, 3), max_value, dtype=dtype)
# aug = iaa.MultiplyElementwise(-2)
# image_aug = aug.augment_image(image)
# assert image_aug.dtype.type == dtype
# assert np.all(image_aug == min_value)
# partially deactivated, because itemsize increase was deactivated
if dtype.name == "uint8":
for _ in sm.xrange(10):
image = np.full((5, 5, 3), 10, dtype=dtype)
aug = iaa.MultiplyElementwise(iap.Uniform(0.5, 1.5))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(5 <= image_aug, image_aug <= 15))
assert len(np.unique(image_aug)) > 1
assert np.all(image_aug[..., 0] == image_aug[..., 1])
image = np.full((1, 1, 100), 10, dtype=dtype)
aug = iaa.MultiplyElementwise(iap.Uniform(0.5, 1.5), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(5 <= image_aug, image_aug <= 15))
assert len(np.unique(image_aug)) > 1
image = np.full((5, 5, 3), 10, dtype=dtype)
aug = iaa.MultiplyElementwise(iap.DiscreteUniform(1, 3))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(10 <= image_aug, image_aug <= 30))
assert len(np.unique(image_aug)) > 1
assert np.all(image_aug[..., 0] == image_aug[..., 1])
image = np.full((1, 1, 100), 10, dtype=dtype)
aug = iaa.MultiplyElementwise(iap.DiscreteUniform(1, 3), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(10 <= image_aug, image_aug <= 30))
assert len(np.unique(image_aug)) > 1
def test_other_dtypes_float(self):
# float
for dtype in [np.float16, np.float32]:
dtype = np.dtype(dtype)
min_value, center_value, max_value = iadt.get_value_range_of_dtype(dtype)
if dtype == np.float16:
atol = 1e-3 * max_value
else:
atol = 1e-9 * max_value
_allclose = functools.partial(np.allclose, atol=atol, rtol=0)
image = np.full((3, 3), 10.0, dtype=dtype)
aug = iaa.MultiplyElementwise(1.0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, 10.0)
# deactivated, because itemsize increase was deactivated
# image = np.full((3, 3), 10.0, dtype=dtype)
# aug = iaa.MultiplyElementwise(2.0)
# image_aug = aug.augment_image(image)
# assert image_aug.dtype.type == dtype
# assert _allclose(image_aug, 20.0)
# deactivated, because itemsize increase was deactivated
# image = np.full((3, 3), max_value, dtype=dtype)
# aug = iaa.MultiplyElementwise(-10)
# image_aug = aug.augment_image(image)
# assert image_aug.dtype.type == dtype
# assert _allclose(image_aug, min_value)
image = np.full((3, 3), max_value, dtype=dtype)
aug = iaa.MultiplyElementwise(0.0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, 0.0)
image = np.full((3, 3), max_value, dtype=dtype)
aug = iaa.MultiplyElementwise(0.5)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, 0.5*max_value)
# deactivated, because itemsize increase was deactivated
# image = np.full((3, 3), min_value, dtype=dtype)
# aug = iaa.MultiplyElementwise(-2.0)
# image_aug = aug.augment_image(image)
# assert image_aug.dtype.type == dtype
# assert _allclose(image_aug, max_value)
image = np.full((3, 3), min_value, dtype=dtype)
aug = iaa.MultiplyElementwise(0.0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, 0.0)
# using tolerances of -100 - 1e-2 and 100 + 1e-2 is not enough for float16, had to be increased to -/+ 1e-1
# deactivated, because itemsize increase was deactivated
"""
for _ in sm.xrange(10):
image = np.full((50, 1, 3), 10.0, dtype=dtype)
aug = iaa.MultiplyElementwise(iap.Uniform(-10, 10))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-100 - 1e-1 < image_aug, image_aug < 100 + 1e-1))
assert not np.allclose(image_aug[1:, :, 0], image_aug[:-1, :, 0])
assert np.allclose(image_aug[..., 0], image_aug[..., 1])
image = np.full((1, 1, 100), 10.0, dtype=dtype)
aug = iaa.MultiplyElementwise(iap.Uniform(-10, 10), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-100 - 1e-1 < image_aug, image_aug < 100 + 1e-1))
assert not np.allclose(image_aug[:, :, 1:], image_aug[:, :, :-1])
image = np.full((50, 1, 3), 10.0, dtype=dtype)
aug = iaa.MultiplyElementwise(iap.DiscreteUniform(-10, 10))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-100 - 1e-1 < image_aug, image_aug < 100 + 1e-1))
assert not np.allclose(image_aug[1:, :, 0], image_aug[:-1, :, 0])
assert np.allclose(image_aug[..., 0], image_aug[..., 1])
image = np.full((1, 1, 100), 10, dtype=dtype)
aug = iaa.MultiplyElementwise(iap.DiscreteUniform(-10, 10), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-100 - 1e-1 < image_aug, image_aug < 100 + 1e-1))
assert not np.allclose(image_aug[:, :, 1:], image_aug[:, :, :-1])
"""
def test_pickleable(self):
aug = iaa.MultiplyElementwise((0.5, 1.5), per_channel=True,
random_state=1)
runtest_pickleable_uint8_img(aug, iterations=3)
class TestReplaceElementwise(unittest.TestCase):
def setUp(self):
reseed()
def test_mask_is_always_zero(self):
# no replace, shouldnt change anything
base_img = np.ones((3, 3, 1), dtype=np.uint8) + 99
images = np.array([base_img])
images_list = [base_img]
aug = iaa.ReplaceElementwise(mask=0, replacement=0)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
assert observed.shape == (1, 3, 3, 1)
observed = aug.augment_images(images_list)
expected = images_list
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = images_list
assert array_equal_lists(observed, expected)
def test_mask_is_always_one(self):
# replace at 100 percent prob., should change everything
base_img = np.ones((3, 3, 1), dtype=np.uint8) + 99
images = np.array([base_img])
images_list = [base_img]
aug = iaa.ReplaceElementwise(mask=1, replacement=0)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = np.zeros((1, 3, 3, 1), dtype=np.uint8)
assert np.array_equal(observed, expected)
assert observed.shape == (1, 3, 3, 1)
observed = aug.augment_images(images_list)
expected = [np.zeros((3, 3, 1), dtype=np.uint8)]
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = np.zeros((1, 3, 3, 1), dtype=np.uint8)
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = [np.zeros((3, 3, 1), dtype=np.uint8)]
assert array_equal_lists(observed, expected)
def test_mask_is_stochastic_parameter(self):
# replace half
aug = iaa.ReplaceElementwise(mask=iap.Binomial(p=0.5), replacement=0)
img = np.ones((100, 100, 1), dtype=np.uint8)
nb_iterations = 100
nb_diff_all = 0
for i in sm.xrange(nb_iterations):
observed = aug.augment_image(img)
nb_diff = np.sum(img != observed)
nb_diff_all += nb_diff
p = nb_diff_all / (nb_iterations * 100 * 100)
assert 0.45 <= p <= 0.55
def test_mask_is_list(self):
# mask is list
aug = iaa.ReplaceElementwise(mask=[0.2, 0.7], replacement=1)
img = np.zeros((20, 20, 1), dtype=np.uint8)
seen = [0, 0, 0]
for i in sm.xrange(400):
observed = aug.augment_image(img)
p = np.mean(observed)
if 0.1 < p < 0.3:
seen[0] += 1
elif 0.6 < p < 0.8:
seen[1] += 1
else:
seen[2] += 1
assert seen[2] <= 10
assert 150 < seen[0] < 250
assert 150 < seen[1] < 250
def test_keypoints_dont_change(self):
# keypoints shouldnt be changed
base_img = np.ones((3, 3, 1), dtype=np.uint8) + 99
keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=0, y=0), ia.Keypoint(x=1, y=1),
ia.Keypoint(x=2, y=2)], shape=base_img.shape)]
aug = iaa.ReplaceElementwise(mask=iap.Binomial(p=0.5), replacement=0)
aug_det = iaa.ReplaceElementwise(mask=iap.Binomial(p=0.5), replacement=0).to_deterministic()
observed = aug.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
observed = aug_det.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
def test_replacement_is_stochastic_parameter(self):
# different replacements
aug = iaa.ReplaceElementwise(mask=1, replacement=iap.Choice([100, 200]))
img = np.zeros((1000, 1000, 1), dtype=np.uint8)
img100 = img + 100
img200 = img + 200
observed = aug.augment_image(img)
nb_diff_100 = np.sum(img100 != observed)
nb_diff_200 = np.sum(img200 != observed)
p100 = nb_diff_100 / (1000 * 1000)
p200 = nb_diff_200 / (1000 * 1000)
assert 0.45 <= p100 <= 0.55
assert 0.45 <= p200 <= 0.55
# test channelwise
aug = iaa.MultiplyElementwise(mul=iap.Choice([0, 1]), per_channel=True)
observed = aug.augment_image(np.ones((100, 100, 3), dtype=np.uint8))
sums = np.sum(observed, axis=2)
values = np.unique(sums)
assert all([(value in values) for value in [0, 1, 2, 3]])
def test_per_channel_with_probability(self):
# test channelwise with probability
aug = iaa.ReplaceElementwise(mask=iap.Choice([0, 1]), replacement=1, per_channel=0.5)
seen = [0, 0]
for _ in sm.xrange(400):
observed = aug.augment_image(np.zeros((20, 20, 3), dtype=np.uint8))
assert observed.shape == (20, 20, 3)
sums = np.sum(observed, axis=2)
values = np.unique(sums)
all_values_found = all([(value in values) for value in [0, 1, 2, 3]])
if all_values_found:
seen[0] += 1
else:
seen[1] += 1
assert 150 < seen[0] < 250
assert 150 < seen[1] < 250
def test___init___bad_datatypes(self):
# test exceptions for wrong parameter types
got_exception = False
try:
_aug = iaa.ReplaceElementwise(mask="test", replacement=1)
except Exception:
got_exception = True
assert got_exception
got_exception = False
try:
_aug = iaa.ReplaceElementwise(mask=1, replacement=1, per_channel="test")
except Exception:
got_exception = True
assert got_exception
def test_zero_sized_axes(self):
shapes = [
(0, 0),
(0, 1),
(1, 0),
(0, 1, 0),
(1, 0, 0),
(0, 1, 1),
(1, 0, 1)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.zeros(shape, dtype=np.uint8)
aug = iaa.ReplaceElementwise(1.0, 1)
image_aug = aug(image=image)
assert np.all(image_aug == 1)
assert image_aug.dtype.name == "uint8"
assert image_aug.shape == image.shape
def test_unusual_channel_numbers(self):
shapes = [
(1, 1, 4),
(1, 1, 5),
(1, 1, 512),
(1, 1, 513)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.zeros(shape, dtype=np.uint8)
aug = iaa.ReplaceElementwise(1.0, 1)
image_aug = aug(image=image)
assert np.all(image_aug == 1)
assert image_aug.dtype.name == "uint8"
assert image_aug.shape == image.shape
def test_get_parameters(self):
# test get_parameters()
aug = iaa.ReplaceElementwise(mask=0.5, replacement=2, per_channel=False)
params = aug.get_parameters()
assert isinstance(params[0], iap.Binomial)
assert isinstance(params[0].p, iap.Deterministic)
assert isinstance(params[1], iap.Deterministic)
assert isinstance(params[2], iap.Deterministic)
assert 0.5 - 1e-6 < params[0].p.value < 0.5 + 1e-6
assert params[1].value == 2
assert params[2].value == 0
def test_heatmaps_dont_change(self):
# test heatmaps (not affected by augmenter)
aug = iaa.ReplaceElementwise(mask=1, replacement=0.5)
hm = ia.quokka_heatmap()
hm_aug = aug.augment_heatmaps([hm])[0]
assert np.allclose(hm.arr_0to1, hm_aug.arr_0to1)
def test_other_dtypes_bool(self):
# bool
aug = iaa.ReplaceElementwise(mask=1, replacement=0)
image = np.full((3, 3), False, dtype=bool)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 0)
aug = iaa.ReplaceElementwise(mask=1, replacement=1)
image = np.full((3, 3), False, dtype=bool)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 1)
aug = iaa.ReplaceElementwise(mask=1, replacement=0)
image = np.full((3, 3), True, dtype=bool)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 0)
aug = iaa.ReplaceElementwise(mask=1, replacement=1)
image = np.full((3, 3), True, dtype=bool)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 1)
aug = iaa.ReplaceElementwise(mask=1, replacement=0.7)
image = np.full((3, 3), False, dtype=bool)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 1)
aug = iaa.ReplaceElementwise(mask=1, replacement=0.2)
image = np.full((3, 3), False, dtype=bool)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 0)
def test_other_dtypes_uint_int(self):
# uint, int
for dtype in [np.uint8, np.uint16, np.uint32, np.int8, np.int16, np.int32]:
dtype = np.dtype(dtype)
min_value, center_value, max_value = iadt.get_value_range_of_dtype(dtype)
aug = iaa.ReplaceElementwise(mask=1, replacement=1)
image = np.full((3, 3), 0, dtype=dtype)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == 1)
aug = iaa.ReplaceElementwise(mask=1, replacement=2)
image = np.full((3, 3), 1, dtype=dtype)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == 2)
# deterministic stochastic parameters are by default int32 for
# any integer value and hence cannot cover the full uint32 value
# range
if dtype.name != "uint32":
aug = iaa.ReplaceElementwise(mask=1, replacement=max_value)
image = np.full((3, 3), min_value, dtype=dtype)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == max_value)
aug = iaa.ReplaceElementwise(mask=1, replacement=min_value)
image = np.full((3, 3), max_value, dtype=dtype)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == min_value)
aug = iaa.ReplaceElementwise(mask=1, replacement=iap.Uniform(1.0, 10.0))
image = np.full((100, 1), 0, dtype=dtype)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(1 <= image_aug, image_aug <= 10))
assert len(np.unique(image_aug)) > 1
aug = iaa.ReplaceElementwise(mask=1, replacement=iap.DiscreteUniform(1, 10))
image = np.full((100, 1), 0, dtype=dtype)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(1 <= image_aug, image_aug <= 10))
assert len(np.unique(image_aug)) > 1
aug = iaa.ReplaceElementwise(mask=0.5, replacement=iap.DiscreteUniform(1, 10), per_channel=True)
image = np.full((1, 1, 100), 0, dtype=dtype)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(0 <= image_aug, image_aug <= 10))
assert len(np.unique(image_aug)) > 2
def test_other_dtypes_float(self):
# float
for dtype in [np.float16, np.float32, np.float64]:
dtype = np.dtype(dtype)
min_value, center_value, max_value = iadt.get_value_range_of_dtype(dtype)
atol = 1e-3*max_value if dtype == np.float16 else 1e-9 * max_value
_allclose = functools.partial(np.allclose, atol=atol, rtol=0)
aug = iaa.ReplaceElementwise(mask=1, replacement=1.0)
image = np.full((3, 3), 0.0, dtype=dtype)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.allclose(image_aug, 1.0)
aug = iaa.ReplaceElementwise(mask=1, replacement=2.0)
image = np.full((3, 3), 1.0, dtype=dtype)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.allclose(image_aug, 2.0)
# deterministic stochastic parameters are by default float32 for
# any float value and hence cannot cover the full float64 value
# range
if dtype.name != "float64":
aug = iaa.ReplaceElementwise(mask=1, replacement=max_value)
image = np.full((3, 3), min_value, dtype=dtype)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, max_value)
aug = iaa.ReplaceElementwise(mask=1, replacement=min_value)
image = np.full((3, 3), max_value, dtype=dtype)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, min_value)
aug = iaa.ReplaceElementwise(mask=1, replacement=iap.Uniform(1.0, 10.0))
image = np.full((100, 1), 0, dtype=dtype)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(1 <= image_aug, image_aug <= 10))
assert not np.allclose(image_aug[1:, :], image_aug[:-1, :], atol=0.01)
aug = iaa.ReplaceElementwise(mask=1, replacement=iap.DiscreteUniform(1, 10))
image = np.full((100, 1), 0, dtype=dtype)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(1 <= image_aug, image_aug <= 10))
assert not np.allclose(image_aug[1:, :], image_aug[:-1, :], atol=0.01)
aug = iaa.ReplaceElementwise(mask=0.5, replacement=iap.DiscreteUniform(1, 10), per_channel=True)
image = np.full((1, 1, 100), 0, dtype=dtype)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(0 <= image_aug, image_aug <= 10))
assert not np.allclose(image_aug[:, :, 1:], image_aug[:, :, :-1], atol=0.01)
def test_pickleable(self):
aug = iaa.ReplaceElementwise(mask=0.5, replacement=(0, 255),
per_channel=True, random_state=1)
runtest_pickleable_uint8_img(aug, iterations=3)
# not more tests necessary here as SaltAndPepper is just a tiny wrapper around
# ReplaceElementwise
class TestSaltAndPepper(unittest.TestCase):
def setUp(self):
reseed()
def test_p_is_fifty_percent(self):
base_img = np.zeros((100, 100, 1), dtype=np.uint8) + 128
aug = iaa.SaltAndPepper(p=0.5)
observed = aug.augment_image(base_img)
p = np.mean(observed != 128)
assert 0.4 < p < 0.6
def test_p_is_one(self):
base_img = np.zeros((100, 100, 1), dtype=np.uint8) + 128
aug = iaa.SaltAndPepper(p=1.0)
observed = aug.augment_image(base_img)
nb_pepper = np.sum(observed < 40)
nb_salt = np.sum(observed > 255 - 40)
assert nb_pepper > 200
assert nb_salt > 200
def test_pickleable(self):
aug = iaa.SaltAndPepper(p=0.5, per_channel=True, random_state=1)
runtest_pickleable_uint8_img(aug, iterations=3)
class TestCoarseSaltAndPepper(unittest.TestCase):
def setUp(self):
reseed()
def test_p_is_fifty_percent(self):
base_img = np.zeros((100, 100, 1), dtype=np.uint8) + 128
aug = iaa.CoarseSaltAndPepper(p=0.5, size_px=100)
observed = aug.augment_image(base_img)
p = np.mean(observed != 128)
assert 0.4 < p < 0.6
def test_size_px(self):
aug1 = iaa.CoarseSaltAndPepper(p=0.5, size_px=100)
aug2 = iaa.CoarseSaltAndPepper(p=0.5, size_px=10)
base_img = np.zeros((100, 100, 1), dtype=np.uint8) + 128
ps1 = []
ps2 = []
for _ in sm.xrange(100):
observed1 = aug1.augment_image(base_img)
observed2 = aug2.augment_image(base_img)
p1 = np.mean(observed1 != 128)
p2 = np.mean(observed2 != 128)
ps1.append(p1)
ps2.append(p2)
assert 0.4 < np.mean(ps2) < 0.6
assert np.std(ps1)*1.5 < np.std(ps2)
def test_p_is_list(self):
aug = iaa.CoarseSaltAndPepper(p=[0.2, 0.5], size_px=100)
base_img = np.zeros((100, 100, 1), dtype=np.uint8) + 128
seen = [0, 0, 0]
for _ in sm.xrange(200):
observed = aug.augment_image(base_img)
p = np.mean(observed != 128)
diff_020 = abs(0.2 - p)
diff_050 = abs(0.5 - p)
if diff_020 < 0.025:
seen[0] += 1
elif diff_050 < 0.025:
seen[1] += 1
else:
seen[2] += 1
assert seen[2] < 10
assert 75 < seen[0] < 125
assert 75 < seen[1] < 125
def test_p_is_tuple(self):
aug = iaa.CoarseSaltAndPepper(p=(0.0, 1.0), size_px=50)
base_img = np.zeros((50, 50, 1), dtype=np.uint8) + 128
ps = []
for _ in sm.xrange(200):
observed = aug.augment_image(base_img)
p = np.mean(observed != 128)
ps.append(p)
nb_bins = 5
hist, _ = np.histogram(ps, bins=nb_bins, range=(0.0, 1.0), density=False)
tolerance = 0.05
for nb_seen in hist:
density = nb_seen / len(ps)
assert density - tolerance < density < density + tolerance
def test___init___bad_datatypes(self):
# test exceptions for wrong parameter types
got_exception = False
try:
_ = iaa.CoarseSaltAndPepper(p="test", size_px=100)
except Exception:
got_exception = True
assert got_exception
got_exception = False
try:
_ = iaa.CoarseSaltAndPepper(p=0.5, size_px=None, size_percent=None)
except Exception:
got_exception = True
assert got_exception
def test_heatmaps_dont_change(self):
# test heatmaps (not affected by augmenter)
aug = iaa.CoarseSaltAndPepper(p=1.0, size_px=2)
hm = ia.quokka_heatmap()
hm_aug = aug.augment_heatmaps([hm])[0]
assert np.allclose(hm.arr_0to1, hm_aug.arr_0to1)
def test_pickleable(self):
aug = iaa.CoarseSaltAndPepper(p=0.5, size_px=(4, 15),
per_channel=True, random_state=1)
runtest_pickleable_uint8_img(aug, iterations=20)
# not more tests necessary here as Salt is just a tiny wrapper around
# ReplaceElementwise
class TestSalt(unittest.TestCase):
def setUp(self):
reseed()
def test_p_is_fifty_percent(self):
base_img = np.zeros((100, 100, 1), dtype=np.uint8) + 128
aug = iaa.Salt(p=0.5)
observed = aug.augment_image(base_img)
p = np.mean(observed != 128)
assert 0.4 < p < 0.6
# Salt() occasionally replaces with 127, which probably should be the center-point here anyways
assert np.all(observed >= 127)
def test_p_is_one(self):
base_img = np.zeros((100, 100, 1), dtype=np.uint8) + 128
aug = iaa.Salt(p=1.0)
observed = aug.augment_image(base_img)
nb_pepper = np.sum(observed < 40)
nb_salt = np.sum(observed > 255 - 40)
assert nb_pepper == 0
assert nb_salt > 200
def test_pickleable(self):
aug = iaa.Salt(p=0.5, per_channel=True, random_state=1)
runtest_pickleable_uint8_img(aug, iterations=3)
class TestCoarseSalt(unittest.TestCase):
def setUp(self):
reseed()
def test_p_is_fifty_percent(self):
base_img = np.zeros((100, 100, 1), dtype=np.uint8) + 128
aug = iaa.CoarseSalt(p=0.5, size_px=100)
observed = aug.augment_image(base_img)
p = np.mean(observed != 128)
assert 0.4 < p < 0.6
def test_size_px(self):
aug1 = iaa.CoarseSalt(p=0.5, size_px=100)
aug2 = iaa.CoarseSalt(p=0.5, size_px=10)
base_img = np.zeros((100, 100, 1), dtype=np.uint8) + 128
ps1 = []
ps2 = []
for _ in sm.xrange(100):
observed1 = aug1.augment_image(base_img)
observed2 = aug2.augment_image(base_img)
p1 = np.mean(observed1 != 128)
p2 = np.mean(observed2 != 128)
ps1.append(p1)
ps2.append(p2)
assert 0.4 < np.mean(ps2) < 0.6
assert np.std(ps1)*1.5 < np.std(ps2)
def test_p_is_list(self):
aug = iaa.CoarseSalt(p=[0.2, 0.5], size_px=100)
base_img = np.zeros((100, 100, 1), dtype=np.uint8) + 128
seen = [0, 0, 0]
for _ in sm.xrange(200):
observed = aug.augment_image(base_img)
p = np.mean(observed != 128)
diff_020 = abs(0.2 - p)
diff_050 = abs(0.5 - p)
if diff_020 < 0.025:
seen[0] += 1
elif diff_050 < 0.025:
seen[1] += 1
else:
seen[2] += 1
assert seen[2] < 10
assert 75 < seen[0] < 125
assert 75 < seen[1] < 125
def test_p_is_tuple(self):
aug = iaa.CoarseSalt(p=(0.0, 1.0), size_px=50)
base_img = np.zeros((50, 50, 1), dtype=np.uint8) + 128
ps = []
for _ in sm.xrange(200):
observed = aug.augment_image(base_img)
p = np.mean(observed != 128)
ps.append(p)
nb_bins = 5
hist, _ = np.histogram(ps, bins=nb_bins, range=(0.0, 1.0), density=False)
tolerance = 0.05
for nb_seen in hist:
density = nb_seen / len(ps)
assert density - tolerance < density < density + tolerance
def test___init___bad_datatypes(self):
# test exceptions for wrong parameter types
got_exception = False
try:
_ = iaa.CoarseSalt(p="test", size_px=100)
except Exception:
got_exception = True
assert got_exception
def test_size_px_or_size_percent_not_none(self):
got_exception = False
try:
_ = iaa.CoarseSalt(p=0.5, size_px=None, size_percent=None)
except Exception:
got_exception = True
assert got_exception
def test_heatmaps_dont_change(self):
# test heatmaps (not affected by augmenter)
aug = iaa.CoarseSalt(p=1.0, size_px=2)
hm = ia.quokka_heatmap()
hm_aug = aug.augment_heatmaps([hm])[0]
assert np.allclose(hm.arr_0to1, hm_aug.arr_0to1)
def test_pickleable(self):
aug = iaa.CoarseSalt(p=0.5, size_px=(4, 15),
per_channel=True, random_state=1)
runtest_pickleable_uint8_img(aug, iterations=20)
# not more tests necessary here as Salt is just a tiny wrapper around
# ReplaceElementwise
class TestPepper(unittest.TestCase):
def setUp(self):
reseed()
def test_probability_is_fifty_percent(self):
base_img = np.zeros((100, 100, 1), dtype=np.uint8) + 128
aug = iaa.Pepper(p=0.5)
observed = aug.augment_image(base_img)
p = np.mean(observed != 128)
assert 0.4 < p < 0.6
assert np.all(observed <= 128)
def test_probability_is_one(self):
base_img = np.zeros((100, 100, 1), dtype=np.uint8) + 128
aug = iaa.Pepper(p=1.0)
observed = aug.augment_image(base_img)
nb_pepper = np.sum(observed < 40)
nb_salt = np.sum(observed > 255 - 40)
assert nb_pepper > 200
assert nb_salt == 0
def test_pickleable(self):
aug = iaa.Pepper(p=0.5, per_channel=True, random_state=1)
runtest_pickleable_uint8_img(aug, iterations=3)
class TestCoarsePepper(unittest.TestCase):
def setUp(self):
reseed()
def test_p_is_fifty_percent(self):
base_img = np.zeros((100, 100, 1), dtype=np.uint8) + 128
aug = iaa.CoarsePepper(p=0.5, size_px=100)
observed = aug.augment_image(base_img)
p = np.mean(observed != 128)
assert 0.4 < p < 0.6
def test_size_px(self):
aug1 = iaa.CoarsePepper(p=0.5, size_px=100)
aug2 = iaa.CoarsePepper(p=0.5, size_px=10)
base_img = np.zeros((100, 100, 1), dtype=np.uint8) + 128
ps1 = []
ps2 = []
for _ in sm.xrange(100):
observed1 = aug1.augment_image(base_img)
observed2 = aug2.augment_image(base_img)
p1 = np.mean(observed1 != 128)
p2 = np.mean(observed2 != 128)
ps1.append(p1)
ps2.append(p2)
assert 0.4 < np.mean(ps2) < 0.6
assert np.std(ps1)*1.5 < np.std(ps2)
def test_p_is_list(self):
aug = iaa.CoarsePepper(p=[0.2, 0.5], size_px=100)
base_img = np.zeros((100, 100, 1), dtype=np.uint8) + 128
seen = [0, 0, 0]
for _ in sm.xrange(200):
observed = aug.augment_image(base_img)
p = np.mean(observed != 128)
diff_020 = abs(0.2 - p)
diff_050 = abs(0.5 - p)
if diff_020 < 0.025:
seen[0] += 1
elif diff_050 < 0.025:
seen[1] += 1
else:
seen[2] += 1
assert seen[2] < 10
assert 75 < seen[0] < 125
assert 75 < seen[1] < 125
def test_p_is_tuple(self):
aug = iaa.CoarsePepper(p=(0.0, 1.0), size_px=50)
base_img = np.zeros((50, 50, 1), dtype=np.uint8) + 128
ps = []
for _ in sm.xrange(200):
observed = aug.augment_image(base_img)
p = np.mean(observed != 128)
ps.append(p)
nb_bins = 5
hist, _ = np.histogram(ps, bins=nb_bins, range=(0.0, 1.0), density=False)
tolerance = 0.05
for nb_seen in hist:
density = nb_seen / len(ps)
assert density - tolerance < density < density + tolerance
def test___init___bad_datatypes(self):
# test exceptions for wrong parameter types
got_exception = False
try:
_ = iaa.CoarsePepper(p="test", size_px=100)
except Exception:
got_exception = True
assert got_exception
def test_size_px_or_size_percent_not_none(self):
got_exception = False
try:
_ = iaa.CoarsePepper(p=0.5, size_px=None, size_percent=None)
except Exception:
got_exception = True
assert got_exception
def test_heatmaps_dont_change(self):
# test heatmaps (not affected by augmenter)
aug = iaa.CoarsePepper(p=1.0, size_px=2)
hm = ia.quokka_heatmap()
hm_aug = aug.augment_heatmaps([hm])[0]
assert np.allclose(hm.arr_0to1, hm_aug.arr_0to1)
def test_pickleable(self):
aug = iaa.CoarsePepper(p=0.5, size_px=(4, 15),
per_channel=True, random_state=1)
runtest_pickleable_uint8_img(aug, iterations=20)
class Test_invert(unittest.TestCase):
@mock.patch("imgaug.augmenters.arithmetic.invert_")
def test_mocked_defaults(self, mock_invert):
mock_invert.return_value = "foo"
arr = np.zeros((1,), dtype=np.uint8)
observed = iaa.invert(arr)
assert observed == "foo"
args = mock_invert.call_args_list[0]
assert np.array_equal(mock_invert.call_args_list[0][0][0], arr)
assert args[1]["min_value"] is None
assert args[1]["max_value"] is None
assert args[1]["threshold"] is None
assert args[1]["invert_above_threshold"] is True
@mock.patch("imgaug.augmenters.arithmetic.invert_")
def test_mocked(self, mock_invert):
mock_invert.return_value = "foo"
arr = np.zeros((1,), dtype=np.uint8)
observed = iaa.invert(arr, min_value=1, max_value=10, threshold=5,
invert_above_threshold=False)
assert observed == "foo"
args = mock_invert.call_args_list[0]
assert np.array_equal(mock_invert.call_args_list[0][0][0], arr)
assert args[1]["min_value"] == 1
assert args[1]["max_value"] == 10
assert args[1]["threshold"] == 5
assert args[1]["invert_above_threshold"] is False
def test_uint8(self):
values = np.array([0, 20, 45, 60, 128, 255], dtype=np.uint8)
expected = np.array([
255,
255-20,
255-45,
255-60,
255-128,
255-255
], dtype=np.uint8)
observed = iaa.invert(values)
assert np.array_equal(observed, expected)
assert observed is not values
# most parts of this function are tested via Invert
class Test_invert_(unittest.TestCase):
def test_arr_is_noncontiguous_uint8(self):
zeros = np.zeros((4, 4, 3), dtype=np.uint8)
max_vr_flipped = np.fliplr(np.copy(zeros + 255))
observed = iaa.invert_(max_vr_flipped)
expected = zeros
assert observed.dtype.name == "uint8"
assert np.array_equal(observed, expected)
def test_arr_is_view_uint8(self):
zeros = np.zeros((4, 4, 3), dtype=np.uint8)
max_vr_view = np.copy(zeros + 255)[:, :, [0, 2]]
observed = iaa.invert_(max_vr_view)
expected = zeros[:, :, [0, 2]]
assert observed.dtype.name == "uint8"
assert np.array_equal(observed, expected)
def test_uint(self):
dtypes = ["uint8", "uint16", "uint32", "uint64"]
for dt in dtypes:
with self.subTest(dtype=dt):
min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dt)
center_value = int(center_value)
values = np.array([0, 20, 45, 60, center_value, max_value],
dtype=dt)
expected = np.array([
max_value - 0,
max_value - 20,
max_value - 45,
max_value - 60,
max_value - center_value,
min_value
], dtype=dt)
observed = iaa.invert_(np.copy(values))
assert np.array_equal(observed, expected)
def test_uint_with_threshold_50_inv_above(self):
threshold = 50
dtypes = ["uint8", "uint16", "uint32", "uint64"]
for dt in dtypes:
with self.subTest(dtype=dt):
min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dt)
center_value = int(center_value)
values = np.array([0, 20, 45, 60, center_value, max_value],
dtype=dt)
expected = np.array([
0,
20,
45,
max_value - 60,
max_value - center_value,
min_value
], dtype=dt)
observed = iaa.invert_(np.copy(values),
threshold=threshold,
invert_above_threshold=True)
assert np.array_equal(observed, expected)
def test_uint_with_threshold_0_inv_above(self):
threshold = 0
dtypes = ["uint8", "uint16", "uint32", "uint64"]
for dt in dtypes:
with self.subTest(dtype=dt):
min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dt)
center_value = int(center_value)
values = np.array([0, 20, 45, 60, center_value, max_value],
dtype=dt)
expected = np.array([
max_value - 0,
max_value - 20,
max_value - 45,
max_value - 60,
max_value - center_value,
min_value
], dtype=dt)
observed = iaa.invert_(np.copy(values),
threshold=threshold,
invert_above_threshold=True)
assert np.array_equal(observed, expected)
def test_uint8_with_threshold_255_inv_above(self):
threshold = 255
dtypes = ["uint8"]
for dt in dtypes:
with self.subTest(dtype=dt):
min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dt)
center_value = int(center_value)
values = np.array([0, 20, 45, 60, center_value, max_value],
dtype=dt)
expected = np.array([
0,
20,
45,
60,
center_value,
min_value
], dtype=dt)
observed = iaa.invert_(np.copy(values),
threshold=threshold,
invert_above_threshold=True)
assert np.array_equal(observed, expected)
def test_uint8_with_threshold_256_inv_above(self):
threshold = 256
dtypes = ["uint8"]
for dt in dtypes:
with self.subTest(dtype=dt):
min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dt)
center_value = int(center_value)
values = np.array([0, 20, 45, 60, center_value, max_value],
dtype=dt)
expected = np.array([
0,
20,
45,
60,
center_value,
max_value
], dtype=dt)
observed = iaa.invert_(np.copy(values),
threshold=threshold,
invert_above_threshold=True)
assert np.array_equal(observed, expected)
def test_uint_with_threshold_50_inv_below(self):
threshold = 50
dtypes = ["uint8", "uint16", "uint32", "uint64"]
for dt in dtypes:
with self.subTest(dtype=dt):
min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dt)
center_value = int(center_value)
values = np.array([0, 20, 45, 60, center_value, max_value],
dtype=dt)
expected = np.array([
max_value - 0,
max_value - 20,
max_value - 45,
60,
center_value,
max_value
], dtype=dt)
observed = iaa.invert_(np.copy(values),
threshold=threshold,
invert_above_threshold=False)
assert np.array_equal(observed, expected)
def test_uint_with_threshold_50_inv_above_with_min_max(self):
threshold = 50
# uint64 does not support custom min/max, hence removed it here
dtypes = ["uint8", "uint16", "uint32"]
for dt in dtypes:
with self.subTest(dtype=dt):
min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dt)
center_value = int(center_value)
values = np.array([0, 20, 45, 60, center_value, max_value],
dtype=dt)
expected = np.array([
0, # not clipped to 10 as only >thresh affected
20,
45,
100 - 50,
100 - 90,
100 - 90
], dtype=dt)
observed = iaa.invert_(np.copy(values),
min_value=10,
max_value=100,
threshold=threshold,
invert_above_threshold=True)
assert np.array_equal(observed, expected)
def test_int_with_threshold_50_inv_above(self):
threshold = 50
dtypes = ["int8", "int16", "int32", "int64"]
for dt in dtypes:
with self.subTest(dtype=dt):
min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dt)
center_value = int(center_value)
values = np.array([-45, -20, center_value, 20, 45, max_value],
dtype=dt)
expected = np.array([
-45,
-20,
center_value,
20,
45,
min_value
], dtype=dt)
observed = iaa.invert_(np.copy(values),
threshold=threshold,
invert_above_threshold=True)
assert np.array_equal(observed, expected)
def test_int_with_threshold_50_inv_below(self):
threshold = 50
dtypes = ["int8", "int16", "int32", "int64"]
for dt in dtypes:
with self.subTest(dtype=dt):
min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dt)
center_value = int(center_value)
values = np.array([-45, -20, center_value, 20, 45, max_value],
dtype=dt)
expected = np.array([
(-1) * (-45) - 1,
(-1) * (-20) - 1,
(-1) * center_value - 1,
(-1) * 20 - 1,
(-1) * 45 - 1,
max_value
], dtype=dt)
observed = iaa.invert_(np.copy(values),
threshold=threshold,
invert_above_threshold=False)
assert np.array_equal(observed, expected)
def test_float_with_threshold_50_inv_above(self):
threshold = 50
dtypes = ["float16", "float32", "float64", "float128"]
for dt in dtypes:
with self.subTest(dtype=dt):
min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dt)
center_value = center_value
values = np.array([-45.5, -20.5, center_value, 20.5, 45.5,
max_value],
dtype=dt)
expected = np.array([
-45.5,
-20.5,
center_value,
20.5,
45.5,
min_value
], dtype=dt)
observed = iaa.invert_(np.copy(values),
threshold=threshold,
invert_above_threshold=True)
assert np.allclose(observed, expected, rtol=0, atol=1e-4)
def test_float_with_threshold_50_inv_below(self):
threshold = 50
dtypes = ["float16", "float32", "float64", "float128"]
for dt in dtypes:
with self.subTest(dtype=dt):
min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dt)
center_value = center_value
values = np.array([-45.5, -20.5, center_value, 20.5, 45.5,
max_value],
dtype=dt)
expected = np.array([
(-1) * (-45.5),
(-1) * (-20.5),
(-1) * center_value,
(-1) * 20.5,
(-1) * 45.5,
max_value
], dtype=dt)
observed = iaa.invert_(np.copy(values),
threshold=threshold,
invert_above_threshold=False)
assert np.allclose(observed, expected, rtol=0, atol=1e-4)
class Test_solarize(unittest.TestCase):
@mock.patch("imgaug.augmenters.arithmetic.solarize_")
def test_mocked_defaults(self, mock_sol):
arr = np.zeros((1,), dtype=np.uint8)
mock_sol.return_value = "foo"
observed = iaa.solarize(arr)
args = mock_sol.call_args_list[0][0]
kwargs = mock_sol.call_args_list[0][1]
assert args[0] is not arr
assert np.array_equal(args[0], arr)
assert kwargs["threshold"] == 128
assert observed == "foo"
@mock.patch("imgaug.augmenters.arithmetic.solarize_")
def test_mocked(self, mock_sol):
arr = np.zeros((1,), dtype=np.uint8)
mock_sol.return_value = "foo"
observed = iaa.solarize(arr, threshold=5)
args = mock_sol.call_args_list[0][0]
kwargs = mock_sol.call_args_list[0][1]
assert args[0] is not arr
assert np.array_equal(args[0], arr)
assert kwargs["threshold"] == 5
assert observed == "foo"
def test_uint8(self):
arr = np.array([0, 10, 50, 150, 200, 255], dtype=np.uint8)
arr = arr.reshape((2, 3, 1))
observed = iaa.solarize(arr)
expected = np.array([0, 10, 50, 255-150, 255-200, 255-255],
dtype=np.uint8).reshape((2, 3, 1))
assert observed.dtype.name == "uint8"
assert np.array_equal(observed, expected)
def test_compare_with_pil(self):
import PIL.Image
import PIL.ImageOps
def _solarize_pil(image, threshold):
img = PIL.Image.fromarray(image)
return np.asarray(PIL.ImageOps.solarize(img, threshold))
image = np.mod(np.arange(20*20*3), 255).astype(np.uint8)\
.reshape((20, 20, 3))
for threshold in np.arange(256):
image_pil = _solarize_pil(image, threshold)
image_iaa = iaa.solarize(image, threshold)
assert np.array_equal(image_pil, image_iaa)
class Test_solarize_(unittest.TestCase):
@mock.patch("imgaug.augmenters.arithmetic.invert_")
def test_mocked_defaults(self, mock_sol):
arr = np.zeros((1,), dtype=np.uint8)
mock_sol.return_value = "foo"
observed = iaa.solarize_(arr)
args = mock_sol.call_args_list[0][0]
kwargs = mock_sol.call_args_list[0][1]
assert args[0] is arr
assert kwargs["threshold"] == 128
assert observed == "foo"
@mock.patch("imgaug.augmenters.arithmetic.invert_")
def test_mocked(self, mock_sol):
arr = np.zeros((1,), dtype=np.uint8)
mock_sol.return_value = "foo"
observed = iaa.solarize_(arr, threshold=5)
args = mock_sol.call_args_list[0][0]
kwargs = mock_sol.call_args_list[0][1]
assert args[0] is arr
assert kwargs["threshold"] == 5
assert observed == "foo"
class TestInvert(unittest.TestCase):
def setUp(self):
reseed()
def test_p_is_one(self):
zeros = np.zeros((4, 4, 3), dtype=np.uint8)
observed = iaa.Invert(p=1.0).augment_image(zeros + 255)
expected = zeros
assert observed.dtype.name == "uint8"
assert np.array_equal(observed, expected)
def test_p_is_zero(self):
zeros = np.zeros((4, 4, 3), dtype=np.uint8)
observed = iaa.Invert(p=0.0).augment_image(zeros + 255)
expected = zeros + 255
assert observed.dtype.name == "uint8"
assert np.array_equal(observed, expected)
def test_max_value_set(self):
zeros = np.zeros((4, 4, 3), dtype=np.uint8)
observed = iaa.Invert(p=1.0, max_value=200).augment_image(zeros + 200)
expected = zeros
assert observed.dtype.name == "uint8"
assert np.array_equal(observed, expected)
def test_min_value_and_max_value_set(self):
zeros = np.zeros((4, 4, 3), dtype=np.uint8)
observed = iaa.Invert(p=1.0, max_value=200, min_value=100).augment_image(zeros + 200)
expected = zeros + 100
assert observed.dtype.name == "uint8"
assert np.array_equal(observed, expected)
observed = iaa.Invert(p=1.0, max_value=200, min_value=100).augment_image(zeros + 100)
expected = zeros + 200
assert observed.dtype.name == "uint8"
assert np.array_equal(observed, expected)
def test_min_value_and_max_value_set_with_float_image(self):
# with min/max and float inputs
zeros = np.zeros((4, 4, 3), dtype=np.uint8)
zeros_f32 = zeros.astype(np.float32)
observed = iaa.Invert(p=1.0, max_value=200, min_value=100).augment_image(zeros_f32 + 200)
expected = zeros_f32 + 100
assert observed.dtype.name == "float32"
assert np.array_equal(observed, expected)
observed = iaa.Invert(p=1.0, max_value=200, min_value=100).augment_image(zeros_f32 + 100)
expected = zeros_f32 + 200
assert observed.dtype.name == "float32"
assert np.array_equal(observed, expected)
def test_p_is_80_percent(self):
nb_iterations = 1000
nb_inverted = 0
aug = iaa.Invert(p=0.8)
img = np.zeros((1, 1, 1), dtype=np.uint8) + 255
expected = np.zeros((1, 1, 1), dtype=np.uint8)
for i in sm.xrange(nb_iterations):
observed = aug.augment_image(img)
if np.array_equal(observed, expected):
nb_inverted += 1
pinv = nb_inverted / nb_iterations
assert 0.75 <= pinv <= 0.85
nb_iterations = 1000
nb_inverted = 0
aug = iaa.Invert(p=iap.Binomial(0.8))
img = np.zeros((1, 1, 1), dtype=np.uint8) + 255
expected = np.zeros((1, 1, 1), dtype=np.uint8)
for i in sm.xrange(nb_iterations):
observed = aug.augment_image(img)
if np.array_equal(observed, expected):
nb_inverted += 1
pinv = nb_inverted / nb_iterations
assert 0.75 <= pinv <= 0.85
def test_per_channel(self):
aug = iaa.Invert(p=0.5, per_channel=True)
img = np.zeros((1, 1, 100), dtype=np.uint8) + 255
observed = aug.augment_image(img)
assert len(np.unique(observed)) == 2
# TODO split into two tests
def test_p_is_stochastic_parameter_per_channel_is_probability(self):
nb_iterations = 1000
aug = iaa.Invert(p=iap.Binomial(0.8), per_channel=0.7)
img = np.zeros((1, 1, 20), dtype=np.uint8) + 255
seen = [0, 0]
for i in sm.xrange(nb_iterations):
observed = aug.augment_image(img)
uq = np.unique(observed)
if len(uq) == 1:
seen[0] += 1
elif len(uq) == 2:
seen[1] += 1
else:
assert False
assert 300 - 75 < seen[0] < 300 + 75
assert 700 - 75 < seen[1] < 700 + 75
def test_threshold(self):
arr = np.array([0, 10, 50, 150, 200, 255], dtype=np.uint8)
arr = arr.reshape((2, 3, 1))
aug = iaa.Invert(p=1.0, threshold=128, invert_above_threshold=True)
observed = aug.augment_image(arr)
expected = np.array([0, 10, 50, 255-150, 255-200, 255-255],
dtype=np.uint8).reshape((2, 3, 1))
assert observed.dtype.name == "uint8"
assert np.array_equal(observed, expected)
def test_threshold_inv_below(self):
arr = np.array([0, 10, 50, 150, 200, 255], dtype=np.uint8)
arr = arr.reshape((2, 3, 1))
aug = iaa.Invert(p=1.0, threshold=128, invert_above_threshold=False)
observed = aug.augment_image(arr)
expected = np.array([255-0, 255-10, 255-50, 150, 200, 255],
dtype=np.uint8).reshape((2, 3, 1))
assert observed.dtype.name == "uint8"
assert np.array_equal(observed, expected)
def test_keypoints_dont_change(self):
# keypoints shouldnt be changed
zeros = np.zeros((4, 4, 3), dtype=np.uint8)
keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=0, y=0), ia.Keypoint(x=1, y=1),
ia.Keypoint(x=2, y=2)], shape=zeros.shape)]
aug = iaa.Invert(p=1.0)
aug_det = iaa.Invert(p=1.0).to_deterministic()
observed = aug.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
observed = aug_det.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
def test___init___bad_datatypes(self):
# test exceptions for wrong parameter types
got_exception = False
try:
_ = iaa.Invert(p="test")
except Exception:
got_exception = True
assert got_exception
got_exception = False
try:
_ = iaa.Invert(p=0.5, per_channel="test")
except Exception:
got_exception = True
assert got_exception
def test_zero_sized_axes(self):
shapes = [
(0, 0),
(0, 1),
(1, 0),
(0, 1, 0),
(1, 0, 0),
(0, 1, 1),
(1, 0, 1)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.zeros(shape, dtype=np.uint8)
aug = iaa.Invert(1.0)
image_aug = aug(image=image)
assert np.all(image_aug == 255)
assert image_aug.dtype.name == "uint8"
assert image_aug.shape == image.shape
def test_unusual_channel_numbers(self):
shapes = [
(1, 1, 4),
(1, 1, 5),
(1, 1, 512),
(1, 1, 513)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.zeros(shape, dtype=np.uint8)
aug = iaa.Invert(1.0)
image_aug = aug(image=image)
assert np.all(image_aug == 255)
assert image_aug.dtype.name == "uint8"
assert image_aug.shape == image.shape
def test_get_parameters(self):
# test get_parameters()
aug = iaa.Invert(p=0.5, per_channel=False, min_value=10, max_value=20)
params = aug.get_parameters()
assert params[0] is aug.p
assert params[1] is aug.per_channel
assert params[2] == 10
assert params[3] == 20
assert params[4] is aug.threshold
assert params[5] is aug.invert_above_threshold
def test_heatmaps_dont_change(self):
# test heatmaps (not affected by augmenter)
aug = iaa.Invert(p=1.0)
hm = ia.quokka_heatmap()
hm_aug = aug.augment_heatmaps([hm])[0]
assert np.allclose(hm.arr_0to1, hm_aug.arr_0to1)
def test_other_dtypes_p_is_zero(self):
# with p=0.0
aug = iaa.Invert(p=0.0)
dtypes = [bool,
np.uint8, np.uint16, np.uint32, np.uint64,
np.int8, np.int16, np.int32, np.int64,
np.float16, np.float32, np.float64, np.float128]
for dtype in dtypes:
min_value, center_value, max_value = iadt.get_value_range_of_dtype(dtype)
kind = np.dtype(dtype).kind
image_min = np.full((3, 3), min_value, dtype=dtype)
if dtype is not bool:
image_center = np.full((3, 3), center_value if kind == "f" else int(center_value), dtype=dtype)
image_max = np.full((3, 3), max_value, dtype=dtype)
image_min_aug = aug.augment_image(image_min)
image_center_aug = None
if dtype is not bool:
image_center_aug = aug.augment_image(image_center)
image_max_aug = aug.augment_image(image_max)
assert image_min_aug.dtype == np.dtype(dtype)
if image_center_aug is not None:
assert image_center_aug.dtype == np.dtype(dtype)
assert image_max_aug.dtype == np.dtype(dtype)
if dtype is bool:
assert np.all(image_min_aug == image_min)
assert np.all(image_max_aug == image_max)
elif np.dtype(dtype).kind in ["i", "u"]:
assert np.array_equal(image_min_aug, image_min)
assert np.array_equal(image_center_aug, image_center)
assert np.array_equal(image_max_aug, image_max)
else:
assert np.allclose(image_min_aug, image_min)
assert np.allclose(image_center_aug, image_center)
assert np.allclose(image_max_aug, image_max)
def test_other_dtypes_p_is_one(self):
# with p=1.0
aug = iaa.Invert(p=1.0)
dtypes = [np.uint8, np.uint16, np.uint32, np.uint64,
np.int8, np.int16, np.int32, np.int64,
np.float16, np.float32, np.float64, np.float128]
for dtype in dtypes:
min_value, center_value, max_value = iadt.get_value_range_of_dtype(dtype)
kind = np.dtype(dtype).kind
image_min = np.full((3, 3), min_value, dtype=dtype)
if dtype is not bool:
image_center = np.full((3, 3), center_value if kind == "f" else int(center_value), dtype=dtype)
image_max = np.full((3, 3), max_value, dtype=dtype)
image_min_aug = aug.augment_image(image_min)
image_center_aug = None
if dtype is not bool:
image_center_aug = aug.augment_image(image_center)
image_max_aug = aug.augment_image(image_max)
assert image_min_aug.dtype == np.dtype(dtype)
if image_center_aug is not None:
assert image_center_aug.dtype == np.dtype(dtype)
assert image_max_aug.dtype == np.dtype(dtype)
if dtype is bool:
assert np.all(image_min_aug == image_max)
assert np.all(image_max_aug == image_min)
elif np.dtype(dtype).kind in ["i", "u"]:
assert np.array_equal(image_min_aug, image_max)
assert np.allclose(image_center_aug, image_center, atol=1.0+1e-4, rtol=0)
assert np.array_equal(image_max_aug, image_min)
else:
assert np.allclose(image_min_aug, image_max)
assert np.allclose(image_center_aug, image_center)
assert np.allclose(image_max_aug, image_min)
def test_other_dtypes_p_is_one_with_min_value(self):
# with p=1.0 and min_value
aug = iaa.Invert(p=1.0, min_value=1)
dtypes = [np.uint8, np.uint16, np.uint32,
np.int8, np.int16, np.int32,
np.float16, np.float32]
for dtype in dtypes:
_min_value, _center_value, max_value = iadt.get_value_range_of_dtype(dtype)
min_value = 1
kind = np.dtype(dtype).kind
center_value = min_value + 0.5 * (max_value - min_value)
image_min = np.full((3, 3), min_value, dtype=dtype)
if dtype is not bool:
image_center = np.full((3, 3), center_value if kind == "f" else int(center_value), dtype=dtype)
image_max = np.full((3, 3), max_value, dtype=dtype)
image_min_aug = aug.augment_image(image_min)
image_center_aug = None
if dtype is not bool:
image_center_aug = aug.augment_image(image_center)
image_max_aug = aug.augment_image(image_max)
assert image_min_aug.dtype == np.dtype(dtype)
if image_center_aug is not None:
assert image_center_aug.dtype == np.dtype(dtype)
assert image_max_aug.dtype == np.dtype(dtype)
if dtype is bool:
assert np.all(image_min_aug == 1)
assert np.all(image_max_aug == 1)
elif np.dtype(dtype).kind in ["i", "u"]:
assert np.array_equal(image_min_aug, image_max)
assert np.allclose(image_center_aug, image_center, atol=1.0+1e-4, rtol=0)
assert np.array_equal(image_max_aug, image_min)
else:
assert np.allclose(image_min_aug, image_max)
assert np.allclose(image_center_aug, image_center)
assert np.allclose(image_max_aug, image_min)
def test_other_dtypes_p_is_one_with_max_value(self):
# with p=1.0 and max_value
aug = iaa.Invert(p=1.0, max_value=16)
dtypes = [np.uint8, np.uint16, np.uint32,
np.int8, np.int16, np.int32,
np.float16, np.float32]
for dtype in dtypes:
min_value, _center_value, _max_value = iadt.get_value_range_of_dtype(dtype)
max_value = 16
kind = np.dtype(dtype).kind
center_value = min_value + 0.5 * (max_value - min_value)
image_min = np.full((3, 3), min_value, dtype=dtype)
if dtype is not bool:
image_center = np.full((3, 3), center_value if kind == "f" else int(center_value), dtype=dtype)
image_max = np.full((3, 3), max_value, dtype=dtype)
image_min_aug = aug.augment_image(image_min)
image_center_aug = None
if dtype is not bool:
image_center_aug = aug.augment_image(image_center)
image_max_aug = aug.augment_image(image_max)
assert image_min_aug.dtype == np.dtype(dtype)
if image_center_aug is not None:
assert image_center_aug.dtype == np.dtype(dtype)
assert image_max_aug.dtype == np.dtype(dtype)
if dtype is bool:
assert not np.any(image_min_aug == 1)
assert not np.any(image_max_aug == 1)
elif np.dtype(dtype).kind in ["i", "u"]:
assert np.array_equal(image_min_aug, image_max)
assert np.allclose(image_center_aug, image_center, atol=1.0+1e-4, rtol=0)
assert np.array_equal(image_max_aug, image_min)
else:
assert np.allclose(image_min_aug, image_max)
if dtype is np.float16:
# for float16, this is off by about 10
assert np.allclose(image_center_aug, image_center, atol=0.001*np.finfo(dtype).max)
else:
assert np.allclose(image_center_aug, image_center)
assert np.allclose(image_max_aug, image_min)
def test_pickleable(self):
aug = iaa.Invert(p=0.5, per_channel=True, random_state=1)
runtest_pickleable_uint8_img(aug, iterations=20, shape=(2, 2, 5))
class TestSolarize(unittest.TestCase):
def test_p_is_one(self):
zeros = np.zeros((4, 4, 3), dtype=np.uint8)
observed = iaa.Solarize(p=1.0).augment_image(zeros)
expected = zeros
assert observed.dtype.name == "uint8"
assert np.array_equal(observed, expected)
def test_p_is_one_some_values_above_threshold(self):
arr = np.array([0, 99, 111, 200]).astype(np.uint8).reshape((2, 2, 1))
observed = iaa.Solarize(p=1.0, threshold=(100, 110))(image=arr)
expected = np.array([0, 99, 255-111, 255-200])\
.astype(np.uint8).reshape((2, 2, 1))
assert observed.dtype.name == "uint8"
assert np.array_equal(observed, expected)
class TestContrastNormalization(unittest.TestCase):
@unittest.skipIf(sys.version_info[0] <= 2,
"Warning is not generated in 2.7 on travis, but locally "
"in 2.7 it is?!")
def test_deprecation_warning(self):
with warnings.catch_warnings(record=True) as caught_warnings:
warnings.simplefilter("always")
aug = arithmetic_lib.ContrastNormalization((0.9, 1.1))
assert isinstance(aug, contrast_lib._ContrastFuncWrapper)
assert len(caught_warnings) == 1
assert (
"deprecated"
in str(caught_warnings[-1].message)
)
# TODO use this in test_contrast.py or remove it?
"""
def deactivated_test_ContrastNormalization():
reseed()
zeros = np.zeros((4, 4, 3), dtype=np.uint8)
keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=0, y=0), ia.Keypoint(x=1, y=1),
ia.Keypoint(x=2, y=2)], shape=zeros.shape)]
# contrast stays the same
observed = iaa.ContrastNormalization(alpha=1.0).augment_image(zeros + 50)
expected = zeros + 50
assert np.array_equal(observed, expected)
# image with mean intensity (ie 128), contrast cannot be changed
observed = iaa.ContrastNormalization(alpha=2.0).augment_image(zeros + 128)
expected = zeros + 128
assert np.array_equal(observed, expected)
# increase contrast
observed = iaa.ContrastNormalization(alpha=2.0).augment_image(zeros + 128 + 10)
expected = zeros + 128 + 20
assert np.array_equal(observed, expected)
observed = iaa.ContrastNormalization(alpha=2.0).augment_image(zeros + 128 - 10)
expected = zeros + 128 - 20
assert np.array_equal(observed, expected)
# decrease contrast
observed = iaa.ContrastNormalization(alpha=0.5).augment_image(zeros + 128 + 10)
expected = zeros + 128 + 5
assert np.array_equal(observed, expected)
observed = iaa.ContrastNormalization(alpha=0.5).augment_image(zeros + 128 - 10)
expected = zeros + 128 - 5
assert np.array_equal(observed, expected)
# increase contrast by stochastic parameter
observed = iaa.ContrastNormalization(alpha=iap.Choice([2.0, 3.0])).augment_image(zeros + 128 + 10)
expected1 = zeros + 128 + 20
expected2 = zeros + 128 + 30
assert np.array_equal(observed, expected1) or np.array_equal(observed, expected2)
# change contrast by tuple
nb_iterations = 1000
nb_changed = 0
last = None
for i in sm.xrange(nb_iterations):
observed = iaa.ContrastNormalization(alpha=(0.5, 2.0)).augment_image(zeros + 128 + 40)
if last is None:
last = observed
else:
if not np.array_equal(observed, last):
nb_changed += 1
p_changed = nb_changed / (nb_iterations-1)
assert p_changed > 0.5
# per_channel=True
aug = iaa.ContrastNormalization(alpha=(1.0, 6.0), per_channel=True)
img = np.zeros((1, 1, 100), dtype=np.uint8) + 128 + 10
observed = aug.augment_image(img)
uq = np.unique(observed)
assert len(uq) > 5
# per_channel with probability
aug = iaa.ContrastNormalization(alpha=(1.0, 4.0), per_channel=0.7)
img = np.zeros((1, 1, 100), dtype=np.uint8) + 128 + 10
seen = [0, 0]
for _ in sm.xrange(1000):
observed = aug.augment_image(img)
uq = np.unique(observed)
if len(uq) == 1:
seen[0] += 1
elif len(uq) >= 2:
seen[1] += 1
assert 300 - 75 < seen[0] < 300 + 75
assert 700 - 75 < seen[1] < 700 + 75
# keypoints shouldnt be changed
aug = iaa.ContrastNormalization(alpha=2.0)
aug_det = iaa.ContrastNormalization(alpha=2.0).to_deterministic()
observed = aug.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
observed = aug_det.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
# test exceptions for wrong parameter types
got_exception = False
try:
_ = iaa.ContrastNormalization(alpha="test")
except Exception:
got_exception = True
assert got_exception
got_exception = False
try:
_ = iaa.ContrastNormalization(alpha=1.5, per_channel="test")
except Exception:
got_exception = True
assert got_exception
# test get_parameters()
aug = iaa.ContrastNormalization(alpha=1, per_channel=False)
params = aug.get_parameters()
assert isinstance(params[0], iap.Deterministic)
assert isinstance(params[1], iap.Deterministic)
assert params[0].value == 1
assert params[1].value == 0
# test heatmaps (not affected by augmenter)
aug = iaa.ContrastNormalization(alpha=2)
hm = ia.quokka_heatmap()
hm_aug = aug.augment_heatmaps([hm])[0]
assert np.allclose(hm.arr_0to1, hm_aug.arr_0to1)
"""
class TestJpegCompression(unittest.TestCase):
def setUp(self):
reseed()
def test_compression_is_zero(self):
# basic test at 0 compression
img = ia.quokka(extract="square", size=(64, 64))
aug = iaa.JpegCompression(0)
img_aug = aug.augment_image(img)
diff = np.average(np.abs(img.astype(np.float32) - img_aug.astype(np.float32)))
assert diff < 1.0
def test_compression_is_90(self):
# basic test at 90 compression
img = ia.quokka(extract="square", size=(64, 64))
aug = iaa.JpegCompression(90)
img_aug = aug.augment_image(img)
diff = np.average(np.abs(img.astype(np.float32) - img_aug.astype(np.float32)))
assert 1.0 < diff < 50.0
def test___init__(self):
aug = iaa.JpegCompression([0, 100])
assert isinstance(aug.compression, iap.Choice)
assert len(aug.compression.a) == 2
assert aug.compression.a[0] == 0
assert aug.compression.a[1] == 100
def test_get_parameters(self):
aug = iaa.JpegCompression([0, 100])
assert len(aug.get_parameters()) == 1
assert aug.get_parameters()[0] == aug.compression
def test_compression_is_stochastic_parameter(self):
# test if stochastic parameters are used by augmentation
img = ia.quokka(extract="square", size=(64, 64))
class _TwoValueParam(iap.StochasticParameter):
def __init__(self, v1, v2):
super(_TwoValueParam, self).__init__()
self.v1 = v1
self.v2 = v2
def _draw_samples(self, size, random_state):
arr = np.full(size, self.v1, dtype=np.float32)
arr[1::2] = self.v2
return arr
param = _TwoValueParam(0, 100)
aug = iaa.JpegCompression(param)
img_aug_c0 = iaa.JpegCompression(0).augment_image(img)
img_aug_c100 = iaa.JpegCompression(100).augment_image(img)
imgs_aug = aug.augment_images([img] * 4)
assert | np.array_equal(imgs_aug[0], img_aug_c0) | numpy.array_equal |
#!/usr/bin/env python2
# Copyright (c) 2017-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
"""Train a network with Detectron."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import argparse
import cv2 # NOQA (Must import before importing caffe2 due to bug in cv2)
import logging
import numpy as np
import pprint
import sys
from caffe2.python import workspace
from detectron.datasets.json_dataset import JsonDataset
from detectron.core.config import assert_and_infer_cfg
from detectron.core.config import cfg
from detectron.core.config import merge_cfg_from_file
from detectron.core.config import merge_cfg_from_list
from detectron.datasets.roidb import flipped_roidb_for_training,extend_with_flipped_entries,get_training_roidb
from detectron.core.test_engine import run_inference
from detectron.utils.logging import setup_logging
import detectron.utils.c2 as c2_utils
import detectron.utils.train
from bitmap import BitMap
from detectron.utils.helper import *
import pickle
c2_utils.import_contrib_ops()
c2_utils.import_detectron_ops()
# OpenCL may be enabled by default in OpenCV3; disable it because it's not
# thread safe and causes unwanted GPU memory allocations.
cv2.ocl.setUseOpenCL(False)
def parse_args():
parser = argparse.ArgumentParser(
description='Train a network with Detectron'
)
parser.add_argument(
'--cfg',
dest='cfg_file',
help='Config file for training (and optionally testing)',
default=None,
type=str
)
parser.add_argument(
'--multi-gpu-testing',
dest='multi_gpu_testing',
help='Use cfg.NUM_GPUS GPUs for inference',
action='store_true'
)
parser.add_argument(
'--skip-test',
dest='skip_test',
help='Do not test the final model',
action='store_true'
)
parser.add_argument(
'opts',
help='See lib/core/config.py for all options',
default=None,
nargs=argparse.REMAINDER
)
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
return parser.parse_args()
def main():
# Initialize C2
workspace.GlobalInit(
['caffe2', '--caffe2_log_level=0', '--caffe2_gpu_memory_tracking=1']
)
# Set up logging and load config options
logger = setup_logging(__name__)
logging.getLogger('detectron.roi_data.loader').setLevel(logging.INFO)
args = parse_args()
logger.info('Called with args:')
logger.info(args)
if args.cfg_file is not None:
merge_cfg_from_file(args.cfg_file)
if args.opts is not None:
merge_cfg_from_list(args.opts)
assert_and_infer_cfg()
logger.info('Training with config:')
logger.info(pprint.pformat(cfg))
# Note that while we set the numpy random seed network training will not be
# deterministic in general. There are sources of non-determinism that cannot
# be removed with a reasonble execution-speed tradeoff (such as certain
# non-deterministic cudnn functions).
np.random.seed(cfg.RNG_SEED)
# Execute the training run
fs = open('imgnames.pkl','rb')
roidbnames = pickle.load(fs)
fs.close()
logger.info('Loading dataset: {}'.format(cfg.TRAIN.DATASETS))
dataset_names = cfg.TRAIN.DATASETS;proposal_files = cfg.TRAIN.PROPOSAL_FILES
roidb = get_training_roidb(dataset_names,proposal_files)
logger.info('{:d} roidb entries'.format(len(roidb)))
total_num = len(roidb)
# bitmap idx indicated for training
bitmapRoidb = BitMap(total_num)
# initial samples
# initial_num = int(total_num*0.2)
# for i in range(initial_num):
# bitmapRoidb.set(i)
#
# train_roidb = [roidb[i] for i in range(initial_num)]
initialidx = []
train_roidb =[]
for i,x in enumerate(roidb):
if x['image'].split('/')[-1] in roidbnames:
initialidx.append(i)
train_roidb.append(x)
for i in initialidx:
bitmapRoidb.set(i)
logger.info('{:d} the number initial roidb entries'.format(len(train_roidb)))
# append flipped images
train_roidb = flipped_roidb_for_training(train_roidb)
logger.info('{:d} the number initial roidb entries'.format(len(train_roidb)))
alamount = 0;ssamount = 0;gamma = 0.95
# control al proportion
al_proportion_checkpoint = [int(x*total_num*0.4) for x in | np.linspace(0.2,1,10) | numpy.linspace |
import os
from IMLearn.utils import split_train_test
from IMLearn.learners.regressors import LinearRegression
from typing import NoReturn
import numpy as np
import pandas as pd
import plotly.graph_objects as go
import plotly.express as px
import plotly.io as pio
from matplotlib import pyplot as plt
pio.templates.default = "simple_white"
def load_data(filename: str):
"""
Load house prices dataset and preprocess data.
Parameters
----------
filename: str
Path to house prices dataset
Returns
-------
Design matrix and response vector (prices) - either as a single
DataFrame or a Tuple[DataFrame, Series]
"""
file = pd.read_csv(filename)
new_df = pd.DataFrame(file)
new_df = new_df.dropna()
new_df.drop(columns=['id'], inplace=True)
new_df.drop(columns=['date'], inplace=True)
new_df = new_df[new_df['price'] > 0]
new_df = new_df[new_df['bathrooms'] > 0]
new_df = new_df[new_df['sqft_lot15'] > 0]
return new_df
def feature_evaluation(X: pd.DataFrame, y: pd.Series,
output_path: str = "C:/Users/nogaz/PycharmProjects/IML.HUJI/graphs/ex2") -> NoReturn:
"""
Create scatter plot between each feature and the response.
- Plot title specifies feature name
- Plot title specifies Pearson Correlation between feature and response
- Plot saved under given folder with file name including feature name
Parameters
----------
X : DataFrame of shape (n_samples, n_features)
Design matrix of regression problem
y : array-like of shape (n_samples, )
Response vector to evaluate against
output_path: str (default ".")
Path to folder in which plots are saved
"""
X = X.join(pd.DataFrame({'price': y}))
covx = X.cov()
for feature in X.columns:
fig = plt.figure()
fig.clear()
pear = covx[feature]['price'] / (np.std(X[feature]) * np.std(X['price']))
title = ("Pearson Correlation between "
"" + str(feature) + " and price = " + str(pear))
plt.scatter(X[feature], y)
plt.xlabel(str(feature))
plt.ylabel('price')
plt.title(title)
plt.savefig(output_path + "/{}.png".format(feature))
plt.close()
if __name__ == '__main__':
np.random.seed(0)
# Question 1 - Load and preprocessing of housing prices dataset
df = load_data(r'C:\Users\nogaz\PycharmProjects\IML.HUJI\datasets\house_prices.csv')
price = df['price']
df = df.drop('price', axis=1)
# Question 2 - Feature evaluation with respect to response
feature_evaluation(df, price)
# Question 3 - Split samples into training- and testing sets.
tr_x, tr_y, te_x, te_y = split_train_test(df, price)
# Question 4 - Fit model over increasing percentages of the overall training data
# For every percentage p in 10%, 11%, ..., 100%, repeat the following 10 times:
# 1) Sample p% of the overall training data
# 2) Fit linear model (including intercept) over sampled set
# 3) Test fitted model over test set
# 4) Store average and variance of loss over test set
mean_loss = []
var_loss = []
model = LinearRegression()
samp_size = np.linspace(10, 100, num=90)
for i in range(10, 100):
f = i / 100
loss_i = np.ones(10, )
for j in range(10):
cur_tr_x = tr_x.sample(frac=f)
cur_tr_y = tr_y.loc[cur_tr_x.index]
model._fit(cur_tr_x.values, cur_tr_y.values)
cur_loss = model._loss(te_x.values, te_y.values)
loss_i[j] = cur_loss
mean_loss.append(np.mean(loss_i))
var_loss.append(np.std(loss_i))
fig = go.Figure(
[go.Scatter(x=samp_size, y=np.array(mean_loss) - (2 * np.array(var_loss)), fill=None, mode="lines", line=dict(color="lightgrey"),
showlegend=False),
go.Scatter(x=samp_size, y=np.array(mean_loss) + (2 * | np.array(var_loss) | numpy.array |
from matplotlib.testing import setup
import numpy as np
import numpy.testing as npt
import matplotlib.pyplot as plt
import matplotlib as mpl
import packaging.version
import pytest
import animatplot as amp
from tests.tools import animation_compare
from animatplot.blocks import Block, Title
setup()
class TestTitleBlock:
def test_list_of_str(self):
labels = ['timestep 0', 'timestep 1']
result = Title(labels)
assert labels == result.titles
assert len(result) == 2
def test_invalid_input(self):
with pytest.raises(TypeError):
Title(0)
with pytest.raises(TypeError):
Title([6, 7])
def test_format_str(self):
actual = Title('timestep {num}', num=[1, 2]).titles
assert actual == ['timestep 1', 'timestep 2']
actual = Title('timestep {num}', num=[1]).titles
assert actual == ['timestep 1']
def test_no_replacements(self):
actual = Title('Name').titles
assert actual == ['Name']
def test_multiple_replacements(self):
actual = Title('timestep {num}, max density {n}',
num=[1, 2], n=[500, 10]).titles
expected = ['timestep {num}, max density {n}'.format(num=1, n=500),
'timestep {num}, max density {n}'.format(num=2, n=10)]
assert actual == expected
def test_string_formatting(self):
actual = Title('timestep {values:.2f}', values=[5e7]).titles
assert actual == ['timestep 50000000.00']
def test_format_str_numpy_arrays(self):
actual = Title('timestep {num}', num=np.array([1, 2])).titles
assert actual == ['timestep 1', 'timestep 2']
# Hypothesis test that the strings are always formatted correctly?
def test_text(self):
# TODO test that the right type of object is produced?
title_block = Title('timestep {num}', num=[1, 2])
ax = plt.gca()
assert ax.get_title() == 'timestep 1'
title_block._update(1)
assert ax.get_title() == 'timestep 2'
plt.close('all')
def test_mpl_kwargs(self):
expected = {'loc': 'left', 'fontstyle': 'italic'}
actual = Title('timestep {num}', num=[1, 2], **expected)
assert actual._mpl_kwargs == expected
def assert_jagged_arrays_equal(x, y):
for x, y in zip(x, y):
npt.assert_equal(x, y)
class TestLineBlock:
def test_2d_inputs(self):
x = np.linspace(0, 1, 10)
t = np.linspace(0, 1, 5)
x_grid, t_grid = np.meshgrid(x, t)
y_data = np.sin(2 * np.pi * (x_grid + t_grid))
line_block = amp.blocks.Line(x_grid, y_data)
assert isinstance(line_block, amp.blocks.Line)
npt.assert_equal(line_block.x, x_grid)
npt.assert_equal(line_block.y, y_data)
assert len(line_block) == len(t)
assert isinstance(line_block.line, mpl.lines.Line2D)
xdata, ydata = line_block.line.get_data()
npt.assert_equal(xdata, x)
npt.assert_equal(ydata, y_data[0, :])
def test_update(self):
x = np.linspace(0, 1, 10)
t = np.linspace(0, 1, 5)
x_grid, t_grid = np.meshgrid(x, t)
y_data = np.sin(2 * np.pi * (x_grid + t_grid))
line_block = amp.blocks.Line(x_grid, y_data)
line_block._update(frame=1)
npt.assert_equal(line_block.line.get_xdata(), x)
npt.assert_equal(line_block.line.get_ydata(), y_data[1, :])
def test_constant_x(self):
x = np.linspace(0, 1, 10)
t = np.linspace(0, 1, 5)
x_grid, t_grid = np.meshgrid(x, t)
y_data = np.sin(2 * np.pi * (x_grid + t_grid))
line_block = amp.blocks.Line(x, y_data)
npt.assert_equal(line_block.line.get_xdata(), x)
npt.assert_equal(line_block.x[-1], x)
def test_no_x_input(self):
x = np.linspace(0, 1, 10)
t = np.linspace(0, 1, 5)
x_grid, t_grid = np.meshgrid(x, t)
y_data = np.sin(2 * np.pi * (x_grid + t_grid))
line_block = amp.blocks.Line(y_data)
expected_x = np.arange(10)
npt.assert_equal(line_block.line.get_xdata(), expected_x)
def test_list_input(self):
x_data = [np.array([1, 2, 3]), np.array([1, 2, 3])]
y_data = [np.array([5, 6, 7]), np.array([4, 2, 9])]
line_block = amp.blocks.Line(x_data, y_data)
npt.assert_equal(line_block.y, np.array([[5, 6, 7], [4, 2, 9]]))
npt.assert_equal(line_block.x, | np.array([[1, 2, 3], [1, 2, 3]]) | numpy.array |
#!/usr/bin/env python
# Copyright 2014-2019 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: <NAME> <<EMAIL>>
#
'''
domain decomposition COSMO
See also the code on github
https://github.com/filippolipparini/ddPCM
and the papers
[1] Domain decomposition for implicit solvation models.
<NAME>, <NAME>, <NAME>
J. Chem. Phys., 139, 054111 (2013)
http://dx.doi.org/10.1063/1.4816767
[2] Fast Domain Decomposition Algorithm for Continuum Solvation Models: Energy and First Derivatives.
<NAME>, <NAME>, <NAME>, <NAME>, <NAME>
J. Chem. Theory Comput., 9, 3637-3648 (2013)
http://dx.doi.org/10.1021/ct400280b
[3] Quantum, classical, and hybrid QM/MM calculations in solution: General implementation of the ddCOSMO linear scaling strategy.
<NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, J.-P.Piquemal, <NAME>, <NAME>
J. Chem. Phys., 141, 184108 (2014)
http://dx.doi.org/10.1063/1.4901304
-- Dielectric constants (from https://gaussian.com/scrf/) --
More dataset can be found in Minnesota Solvent Descriptor Database
(https://comp.chem.umn.edu/solvation)
Water 78.3553
Acetonitrile 35.688
Methanol 32.613
Ethanol 24.852
IsoQuinoline 11.00
Quinoline 9.16
Chloroform 4.7113
DiethylEther 4.2400
Dichloromethane 8.93
DiChloroEthane 10.125
CarbonTetraChloride 2.2280
Benzene 2.2706
Toluene 2.3741
ChloroBenzene 5.6968
NitroMethane 36.562
Heptane 1.9113
CycloHexane 2.0165
Aniline 6.8882
Acetone 20.493
TetraHydroFuran 7.4257
DiMethylSulfoxide 46.826
Argon 1.430
Krypton 1.519
Xenon 1.706
n-Octanol 9.8629
1,1,1-TriChloroEthane 7.0826
1,1,2-TriChloroEthane 7.1937
1,2,4-TriMethylBenzene 2.3653
1,2-DiBromoEthane 4.9313
1,2-EthaneDiol 40.245
1,4-Dioxane 2.2099
1-Bromo-2-MethylPropane 7.7792
1-BromoOctane 5.0244
1-BromoPentane 6.269
1-BromoPropane 8.0496
1-Butanol 17.332
1-ChloroHexane 5.9491
1-ChloroPentane 6.5022
1-ChloroPropane 8.3548
1-Decanol 7.5305
1-FluoroOctane 3.89
1-Heptanol 11.321
1-Hexanol 12.51
1-Hexene 2.0717
1-Hexyne 2.615
1-IodoButane 6.173
1-IodoHexaDecane 3.5338
1-IodoPentane 5.6973
1-IodoPropane 6.9626
1-NitroPropane 23.73
1-Nonanol 8.5991
1-Pentanol 15.13
1-Pentene 1.9905
1-Propanol 20.524
2,2,2-TriFluoroEthanol 26.726
2,2,4-TriMethylPentane 1.9358
2,4-DiMethylPentane 1.8939
2,4-DiMethylPyridine 9.4176
2,6-DiMethylPyridine 7.1735
2-BromoPropane 9.3610
2-Butanol 15.944
2-ChloroButane 8.3930
2-Heptanone 11.658
2-Hexanone 14.136
2-MethoxyEthanol 17.2
2-Methyl-1-Propanol 16.777
2-Methyl-2-Propanol 12.47
2-MethylPentane 1.89
2-MethylPyridine 9.9533
2-NitroPropane 25.654
2-Octanone 9.4678
2-Pentanone 15.200
2-Propanol 19.264
2-Propen-1-ol 19.011
3-MethylPyridine 11.645
3-Pentanone 16.78
4-Heptanone 12.257
4-Methyl-2-Pentanone 12.887
4-MethylPyridine 11.957
5-Nonanone 10.6
AceticAcid 6.2528
AcetoPhenone 17.44
a-ChloroToluene 6.7175
Anisole 4.2247
Benzaldehyde 18.220
BenzoNitrile 25.592
BenzylAlcohol 12.457
BromoBenzene 5.3954
BromoEthane 9.01
Bromoform 4.2488
Butanal 13.45
ButanoicAcid 2.9931
Butanone 18.246
ButanoNitrile 24.291
ButylAmine 4.6178
ButylEthanoate 4.9941
CarbonDiSulfide 2.6105
Cis-1,2-DiMethylCycloHexane 2.06
Cis-Decalin 2.2139
CycloHexanone 15.619
CycloPentane 1.9608
CycloPentanol 16.989
CycloPentanone 13.58
Decalin-mixture 2.196
DiBromomEthane 7.2273
DiButylEther 3.0473
DiEthylAmine 3.5766
DiEthylSulfide 5.723
DiIodoMethane 5.32
DiIsoPropylEther 3.38
DiMethylDiSulfide 9.6
DiPhenylEther 3.73
DiPropylAmine 2.9112
e-1,2-DiChloroEthene 2.14
e-2-Pentene 2.051
EthaneThiol 6.667
EthylBenzene 2.4339
EthylEthanoate 5.9867
EthylMethanoate 8.3310
EthylPhenylEther 4.1797
FluoroBenzene 5.42
Formamide 108.94
FormicAcid 51.1
HexanoicAcid 2.6
IodoBenzene 4.5470
IodoEthane 7.6177
IodoMethane 6.8650
IsoPropylBenzene 2.3712
m-Cresol 12.44
Mesitylene 2.2650
MethylBenzoate 6.7367
MethylButanoate 5.5607
MethylCycloHexane 2.024
MethylEthanoate 6.8615
MethylMethanoate 8.8377
MethylPropanoate 6.0777
m-Xylene 2.3478
n-ButylBenzene 2.36
n-Decane 1.9846
n-Dodecane 2.0060
n-Hexadecane 2.0402
n-Hexane 1.8819
NitroBenzene 34.809
NitroEthane 28.29
n-MethylAniline 5.9600
n-MethylFormamide-mixture 181.56
n,n-DiMethylAcetamide 37.781
n,n-DiMethylFormamide 37.219
n-Nonane 1.9605
n-Octane 1.9406
n-Pentadecane 2.0333
n-Pentane 1.8371
n-Undecane 1.9910
o-ChloroToluene 4.6331
o-Cresol 6.76
o-DiChloroBenzene 9.9949
o-NitroToluene 25.669
o-Xylene 2.5454
Pentanal 10.0
PentanoicAcid 2.6924
PentylAmine 4.2010
PentylEthanoate 4.7297
PerFluoroBenzene 2.029
p-IsoPropylToluene 2.2322
Propanal 18.5
PropanoicAcid 3.44
PropanoNitrile 29.324
PropylAmine 4.9912
PropylEthanoate 5.5205
p-Xylene 2.2705
Pyridine 12.978
sec-ButylBenzene 2.3446
tert-ButylBenzene 2.3447
TetraChloroEthene 2.268
TetraHydroThiophene-s,s-dioxide 43.962
Tetralin 2.771
Thiophene 2.7270
Thiophenol 4.2728
trans-Decalin 2.1781
TriButylPhosphate 8.1781
TriChloroEthene 3.422
TriEthylAmine 2.3832
Xylene-mixture 2.3879
z-1,2-DiChloroEthene 9.2
'''
import ctypes
import copy
import numpy
from pyscf import lib
from pyscf.lib import logger
from pyscf import gto
from pyscf import df
from pyscf.dft import gen_grid, numint
from pyscf.data import radii
from pyscf.symm import sph
def ddcosmo_for_scf(mf, solvent_obj=None, dm=None):
'''Patch ddCOSMO to SCF (HF and DFT) method.
Kwargs:
dm : if given, solvent does not response to the change of density
matrix. A frozen ddCOSMO potential is added to the results.
'''
if getattr(mf, 'with_solvent', None):
if solvent_obj is not None:
mf.with_solvent = solvent_obj
return mf
oldMF = mf.__class__
if solvent_obj is None:
solvent_obj = DDCOSMO(mf.mol)
if dm is not None:
solvent_obj.epcm, solvent_obj.vpcm = solvent_obj.kernel(dm)
solvent_obj.frozen = True
class SCFWithSolvent(oldMF):
def __init__(self, mf, solvent):
self.__dict__.update(mf.__dict__)
self.with_solvent = solvent
self._keys.update(['with_solvent'])
def dump_flags(self, verbose=None):
oldMF.dump_flags(self, verbose)
self.with_solvent.check_sanity()
self.with_solvent.dump_flags(verbose)
return self
# Note vpcm should not be added to get_hcore for scf methods.
# get_hcore is overloaded by many post-HF methods. Modifying
# SCF.get_hcore may lead error.
def get_veff(self, mol=None, dm=None, *args, **kwargs):
vhf = oldMF.get_veff(self, mol, dm, *args, **kwargs)
with_solvent = self.with_solvent
if not with_solvent.frozen:
with_solvent.epcm, with_solvent.vpcm = with_solvent.kernel(dm)
epcm, vpcm = with_solvent.epcm, with_solvent.vpcm
# NOTE: vpcm should not be added to vhf in this place. This is
# because vhf is used as the reference for direct_scf in the next
# iteration. If vpcm is added here, it may break direct SCF.
return lib.tag_array(vhf, epcm=epcm, vpcm=vpcm)
def get_fock(self, h1e=None, s1e=None, vhf=None, dm=None, cycle=-1,
diis=None, diis_start_cycle=None,
level_shift_factor=None, damp_factor=None):
# DIIS was called inside oldMF.get_fock. vpcm, as a function of
# dm, should be extrapolated as well. To enable it, vpcm has to be
# added to the fock matrix before DIIS was called.
if getattr(vhf, 'vpcm', None) is None:
vhf = self.get_veff(self.mol, dm)
return oldMF.get_fock(self, h1e, s1e, vhf+vhf.vpcm, dm, cycle, diis,
diis_start_cycle, level_shift_factor, damp_factor)
def energy_elec(self, dm=None, h1e=None, vhf=None):
if dm is None:
dm = self.make_rdm1()
if getattr(vhf, 'epcm', None) is None:
vhf = self.get_veff(self.mol, dm)
e_tot, e_coul = oldMF.energy_elec(self, dm, h1e, vhf)
e_tot += vhf.epcm
logger.debug(self, ' E_diel = %.15g', vhf.epcm)
return e_tot, e_coul
def nuc_grad_method(self):
from pyscf.solvent import ddcosmo_grad
grad_method = oldMF.nuc_grad_method(self)
return ddcosmo_grad.ddcosmo_grad(grad_method, self.with_solvent)
mf1 = SCFWithSolvent(mf, solvent_obj)
return mf1
def ddcosmo_for_casscf(mc, solvent_obj=None, dm=None):
'''Patch ddCOSMO to CASSCF method.
Kwargs:
dm : if given, solvent does not response to the change of density
matrix. A frozen ddCOSMO potential is added to the results.
'''
if getattr(mc, 'with_solvent', None):
if solvent_obj is not None:
mc.with_solvent = solvent_obj
return mc
oldCAS = mc.__class__
if solvent_obj is None:
if getattr(mc._scf, 'with_solvent', None):
solvent_obj = mc._scf.with_solvent
else:
solvent_obj = DDCOSMO(mc.mol)
if dm is not None:
solvent_obj.epcm, solvent_obj.vpcm = solvent_obj.kernel(dm)
solvent_obj.frozen = True
class CASSCFWithSolvent(oldCAS):
def __init__(self, mc, solvent):
self.__dict__.update(mc.__dict__)
self.with_solvent = solvent
self._e_tot_without_solvent = 0
self._keys.update(['with_solvent'])
def dump_flags(self, verbose=None):
oldCAS.dump_flags(self, verbose)
self.with_solvent.check_sanity()
self.with_solvent.dump_flags(verbose)
if self.conv_tol < 1e-7:
logger.warn(self, 'CASSCF+ddCOSMO may not be able to '
'converge to conv_tol=%g', self.conv_tol)
return self
def update_casdm(self, mo, u, fcivec, e_ci, eris, envs={}):
casdm1, casdm2, gci, fcivec = \
oldCAS.update_casdm(self, mo, u, fcivec, e_ci, eris, envs)
# The potential is generated based on the density of current micro iteration.
# It will be added to hcore in casci function. Strictly speaking, this density
# is not the same to the CASSCF density (which was used to measure
# convergence) in the macro iterations. When CASSCF is converged, it
# should be almost the same to the CASSCF density of the macro iterations.
with_solvent = self.with_solvent
if not with_solvent.frozen:
# Code to mimic dm = self.make_rdm1(ci=fcivec)
mocore = mo[:,:self.ncore]
mocas = mo[:,self.ncore:self.ncore+self.ncas]
dm = reduce(numpy.dot, (mocas, casdm1, mocas.T))
dm += numpy.dot(mocore, mocore.T) * 2
with_solvent.epcm, with_solvent.vpcm = with_solvent.kernel(dm)
return casdm1, casdm2, gci, fcivec
# ddCOSMO Potential should be added to the effective potential. However, there
# is no hook to modify the effective potential in CASSCF. The workaround
# here is to modify hcore. It can affect the 1-electron operator in many CASSCF
# functions: gen_h_op, update_casdm, casci. Note hcore is used to compute the
# energy for core density (Ecore). The resultant total energy from casci
# function will include the contribution from ddCOSMO potential. The
# duplicated energy contribution from solvent needs to be removed.
def get_hcore(self, mol=None):
hcore = self._scf.get_hcore(mol)
if self.with_solvent.vpcm is not None:
hcore += self.with_solvent.vpcm
return hcore
def casci(self, mo_coeff, ci0=None, eris=None, verbose=None, envs=None):
log = logger.new_logger(self, verbose)
log.debug('Running CASCI with solvent. Note the total energy '
'has duplicated contributions from solvent.')
# In oldCAS.casci function, dE was computed based on the total
# energy without removing the duplicated solvent contributions.
# However, envs['elast'] is the last total energy with correct
# solvent effects. Hack envs['elast'] to make oldCAS.casci print
# the correct energy difference.
envs['elast'] = self._e_tot_without_solvent
e_tot, e_cas, fcivec = oldCAS.casci(self, mo_coeff, ci0, eris,
verbose, envs)
self._e_tot_without_solvent = e_tot
log.debug('Computing corrections to the total energy.')
dm = self.make_rdm1(ci=fcivec, ao_repr=True)
with_solvent = self.with_solvent
if with_solvent.epcm is not None:
edup = numpy.einsum('ij,ji->', with_solvent.vpcm, dm)
ediel = with_solvent.epcm
e_tot = e_tot - edup + ediel
log.info('Removing duplication %.15g, '
'adding E_diel = %.15g to total energy:\n'
' E(CASSCF+solvent) = %.15g', edup, ediel, e_tot)
# Update solvent effects for next iteration if needed
if not with_solvent.frozen:
with_solvent.epcm, with_solvent.vpcm = with_solvent.kernel(dm)
return e_tot, e_cas, fcivec
def nuc_grad_method(self):
from pyscf.solvent import ddcosmo_grad
grad_method = oldCAS.nuc_grad_method(self)
return ddcosmo_grad.ddcosmo_grad(grad_method, self.with_solvent)
return CASSCFWithSolvent(mc, solvent_obj)
def ddcosmo_for_casci(mc, solvent_obj=None, dm=None):
'''Patch ddCOSMO to CASCI method.
Kwargs:
dm : if given, solvent does not response to the change of density
matrix. A frozen ddCOSMO potential is added to the results.
'''
if getattr(mc, 'with_solvent', None):
if solvent_obj is not None:
mc.with_solvent = solvent_obj
return mc
oldCAS = mc.__class__
if solvent_obj is None:
if getattr(mc._scf, 'with_solvent', None):
solvent_obj = mc._scf.with_solvent
else:
solvent_obj = DDCOSMO(mc.mol)
if dm is not None:
solvent_obj.epcm, solvent_obj.vpcm = solvent_obj.kernel(dm)
solvent_obj.frozen = True
class CASCIWithSolvent(oldCAS):
def __init__(self, mc, solvent):
self.__dict__.update(mc.__dict__)
self.with_solvent = solvent
self._keys.update(['with_solvent'])
def dump_flags(self, verbose=None):
oldCAS.dump_flags(self, verbose)
self.with_solvent.check_sanity()
self.with_solvent.dump_flags(verbose)
return self
def get_hcore(self, mol=None):
hcore = self._scf.get_hcore(mol)
if self.with_solvent.vpcm is not None:
# NOTE: get_hcore was called by CASCI to generate core
# potential. vpcm is added in this place to take accounts the
# effects of solvent. Its contribution is duplicated and it
# should be removed from the total energy.
hcore += self.with_solvent.vpcm
return hcore
def kernel(self, mo_coeff=None, ci0=None, verbose=None):
with_solvent = self.with_solvent
log = logger.new_logger(self)
log.info('\n** Self-consistently update the solvent effects for %s **',
oldCAS)
log1 = copy.copy(log)
log1.verbose -= 1 # Suppress a few output messages
def casci_iter_(ci0, log):
# self.e_tot, self.e_cas, and self.ci are updated in the call
# to oldCAS.kernel
e_tot, e_cas, ci0 = oldCAS.kernel(self, mo_coeff, ci0, log)[:3]
if isinstance(self.e_cas, (float, numpy.number)):
dm = self.make_rdm1(ci=ci0)
else:
log.debug('Computing solvent responses to DM of state %d',
with_solvent.state_id)
dm = self.make_rdm1(ci=ci0[with_solvent.state_id])
if with_solvent.epcm is not None:
edup = numpy.einsum('ij,ji->', with_solvent.vpcm, dm)
self.e_tot += with_solvent.epcm - edup
if not with_solvent.frozen:
with_solvent.epcm, with_solvent.vpcm = with_solvent.kernel(dm)
log.debug(' E_diel = %.15g', with_solvent.epcm)
return self.e_tot, e_cas, ci0
if with_solvent.frozen:
with lib.temporary_env(self, _finalize=lambda:None):
casci_iter_(ci0, log)
log.note('Total energy with solvent effects')
self._finalize()
return self.e_tot, self.e_cas, self.ci, self.mo_coeff, self.mo_energy
self.converged = False
with lib.temporary_env(self, canonicalization=False):
e_tot = e_last = 0
for cycle in range(self.with_solvent.max_cycle):
log.info('\n** Solvent self-consistent cycle %d:', cycle)
e_tot, e_cas, ci0 = casci_iter_(ci0, log1)
de = e_tot - e_last
if isinstance(e_cas, (float, numpy.number)):
log.info('Sovlent cycle %d E(CASCI+solvent) = %.15g '
'dE = %g', cycle, e_tot, de)
else:
for i, e in enumerate(e_tot):
log.info('Solvent cycle %d CASCI root %d '
'E(CASCI+solvent) = %.15g dE = %g',
cycle, i, e, de[i])
if abs(e_tot-e_last).max() < with_solvent.conv_tol:
self.converged = True
break
e_last = e_tot
# An extra cycle to canonicalize CASCI orbitals
with lib.temporary_env(self, _finalize=lambda:None):
casci_iter_(ci0, log)
if self.converged:
log.info('self-consistent CASCI+solvent converged')
else:
log.info('self-consistent CASCI+solvent not converged')
log.note('Total energy with solvent effects')
self._finalize()
return self.e_tot, self.e_cas, self.ci, self.mo_coeff, self.mo_energy
def nuc_grad_method(self):
from pyscf.solvent import ddcosmo_grad
grad_method = oldCAS.nuc_grad_method(self)
return ddcosmo_grad.ddcosmo_grad(grad_method, self.with_solvent)
return CASCIWithSolvent(mc, solvent_obj)
def ddcosmo_for_post_scf(method, solvent_obj=None, dm=None):
'''Default wrapper to patch ddCOSMO to post-SCF methods (CC, CI, MP,
TDDFT etc.)
NOTE: this implementation often causes (macro iteration) convergence issue
Kwargs:
dm : if given, solvent does not response to the change of density
matrix. A frozen ddCOSMO potential is added to the results.
'''
if getattr(method, 'with_solvent', None):
if solvent_obj is not None:
method.with_solvent = solvent_obj
method._scf.with_solvent = solvent_obj
return method
old_method = method.__class__
if getattr(method._scf, 'with_solvent', None):
scf_with_solvent = method._scf
if solvent_obj is not None:
scf_with_solvent.with_solvent = solvent_obj
else:
scf_with_solvent = ddcosmo_for_scf(method._scf, solvent_obj, dm)
if dm is None:
solvent_obj = scf_with_solvent.with_solvent
solvent_obj.epcm, solvent_obj.vpcm = \
solvent_obj.kernel(scf_with_solvent.make_rdm1())
# Post-HF objects access the solvent effects indirectly through the
# underlying ._scf object.
basic_scanner = method.as_scanner()
basic_scanner._scf = scf_with_solvent.as_scanner()
if dm is not None:
solvent_obj = scf_with_solvent.with_solvent
solvent_obj.epcm, solvent_obj.vpcm = solvent_obj.kernel(dm)
solvent_obj.frozen = True
class PostSCFWithSolvent(old_method):
def __init__(self, method):
self.__dict__.update(method.__dict__)
self._scf = scf_with_solvent
@property
def with_solvent(self):
return self._scf.with_solvent
def dump_flags(self, verbose=None):
old_method.dump_flags(self, verbose)
self.with_solvent.check_sanity()
self.with_solvent.dump_flags(verbose)
return self
def kernel(self, *args, **kwargs):
with_solvent = self.with_solvent
# The underlying ._scf object is decorated with solvent effects.
# The resultant Fock matrix and orbital energies both include the
# effects from solvent. It means that solvent effects for post-HF
# methods are automatically counted if solvent is enabled at scf
# level.
if with_solvent.frozen:
return old_method.kernel(self, *args, **kwargs)
log = logger.new_logger(self)
log.info('\n** Self-consistently update the solvent effects for %s **',
old_method)
##TODO: Suppress a few output messages
#log1 = copy.copy(log)
#log1.note, log1.info = log1.info, log1.debug
e_last = 0
#diis = lib.diis.DIIS()
for cycle in range(self.with_solvent.max_cycle):
log.info('\n** Solvent self-consistent cycle %d:', cycle)
# Solvent effects are applied when accessing the
# underlying ._scf objects. The flag frozen=True ensures that
# the generated potential with_solvent.vpcm is passed to the
# the post-HF object, without being updated in the implicit
# call to the _scf iterations.
with lib.temporary_env(with_solvent, frozen=True):
e_tot = basic_scanner(self.mol)
dm = basic_scanner.make_rdm1(ao_repr=True)
#dm = diis.update(dm)
# To generate the solvent potential for ._scf object. Since
# frozen is set when calling basic_scanner, the solvent
# effects are frozen during the scf iterations.
with_solvent.epcm, with_solvent.vpcm = with_solvent.kernel(dm)
de = e_tot - e_last
log.info('Sovlent cycle %d E_tot = %.15g dE = %g',
cycle, e_tot, de)
if abs(e_tot-e_last).max() < with_solvent.conv_tol:
break
e_last = e_tot
# An extra cycle to compute the total energy
log.info('\n** Extra cycle for solvent effects')
with lib.temporary_env(with_solvent, frozen=True):
#Update everything except the _scf object and _keys
basic_scanner(self.mol)
self.__dict__.update(basic_scanner.__dict__)
self._scf.__dict__.update(basic_scanner._scf.__dict__)
self._finalize()
return self.e_corr, None
def nuc_grad_method(self):
from pyscf.solvent import ddcosmo_grad
grad_method = old_method.nuc_grad_method(self)
return ddcosmo_grad.ddcosmo_grad(grad_method, self.with_solvent)
return PostSCFWithSolvent(method)
# Inject DDCOSMO into other methods
from pyscf import scf
from pyscf import mcscf
from pyscf import mp, ci, cc
scf.hf.SCF.DDCOSMO = ddcosmo_for_scf
mcscf.casci.DDCOSMO = ddcosmo_for_casci
mcscf.mc1step.DDCOSMO = ddcosmo_for_casscf
mp.mp2.MP2.DDCOSMO = ddcosmo_for_post_scf
ci.cisd.CISD.DDCOSMO = ddcosmo_for_post_scf
cc.ccsd.CCSD.DDCOSMO = ddcosmo_for_post_scf
# TODO: Testing the value of psi (make_psi_vmat). All intermediates except
# psi are tested against ddPCM implementation on github. Psi needs to be
# computed by the host program. It requires the numerical integration code.
def gen_ddcosmo_solver(pcmobj, verbose=None):
'''Generate ddcosmo function to compute energy and potential matrix
'''
mol = pcmobj.mol
if pcmobj.grids.coords is None:
pcmobj.grids.build(with_non0tab=True)
natm = mol.natm
lmax = pcmobj.lmax
r_vdw = pcmobj.get_atomic_radii()
coords_1sph, weights_1sph = make_grids_one_sphere(pcmobj.lebedev_order)
ylm_1sph = numpy.vstack(sph.real_sph_vec(coords_1sph, lmax, True))
fi = make_fi(pcmobj, r_vdw)
ui = 1 - fi
ui[ui<0] = 0
nexposed = numpy.count_nonzero(ui==1)
nbury = numpy.count_nonzero(ui==0)
on_shell = numpy.count_nonzero(ui>0) - nexposed
logger.debug(pcmobj, 'Num points exposed %d', nexposed)
logger.debug(pcmobj, 'Num points buried %d', nbury)
logger.debug(pcmobj, 'Num points on shell %d', on_shell)
nlm = (lmax+1)**2
Lmat = make_L(pcmobj, r_vdw, ylm_1sph, fi)
Lmat = Lmat.reshape(natm*nlm,-1)
cached_pol = cache_fake_multipoles(pcmobj.grids, r_vdw, lmax)
def gen_vind(dm):
pcmobj._dm = dm
if not (isinstance(dm, numpy.ndarray) and dm.ndim == 2):
# spin-traced DM for UHF or ROHF
dm = dm[0] + dm[1]
phi = make_phi(pcmobj, dm, r_vdw, ui)
L_X = numpy.linalg.solve(Lmat, phi.ravel()).reshape(natm,-1)
psi, vmat = make_psi_vmat(pcmobj, dm, r_vdw, ui, pcmobj.grids, ylm_1sph,
cached_pol, L_X, Lmat)[:2]
dielectric = pcmobj.eps
if dielectric > 0:
f_epsilon = (dielectric-1.)/dielectric
else:
f_epsilon = 1
pcmobj.epcm = .5 * f_epsilon * numpy.einsum('jx,jx', psi, L_X)
pcmobj.vpcm = .5 * f_epsilon * vmat
return pcmobj.epcm, pcmobj.vpcm
return gen_vind
def energy(pcmobj, dm):
'''
ddCOSMO energy
Es = 1/2 f(eps) \int rho(r) W(r) dr
'''
epcm = gen_ddcosmo_solver(pcmobj, pcmobj.verbose)(dm)[0]
return epcm
def get_atomic_radii(pcmobj):
mol = pcmobj.mol
vdw_radii = pcmobj.radii_table
atom_radii = pcmobj.atom_radii
atom_symb = [mol.atom_symbol(i) for i in range(mol.natm)]
r_vdw = [vdw_radii[gto.charge(x)] for x in atom_symb]
if atom_radii is not None:
for i in range(mol.natm):
if atom_symb[i] in atom_radii:
r_vdw[i] = atom_radii[atom_symb[i]]
return numpy.asarray(r_vdw)
def regularize_xt(t, eta):
xt = numpy.zeros_like(t)
inner = t <= 1-eta
on_shell = (1-eta < t) & (t < 1)
xt[inner] = 1
ti = t[on_shell]
# JCTC, 9, 3637
xt[on_shell] = 1./eta**5 * (1-ti)**3 * (6*ti**2 + (15*eta-12)*ti
+ 10*eta**2 - 15*eta + 6)
# JCP, 139, 054111
# xt[on_shell] = 1./eta**4 * (1-ti)**2 * (ti-1+2*eta)**2
return xt
def make_grids_one_sphere(lebedev_order):
ngrid_1sph = gen_grid.LEBEDEV_ORDER[lebedev_order]
leb_grid = numpy.empty((ngrid_1sph,4))
gen_grid.libdft.MakeAngularGrid(leb_grid.ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(ngrid_1sph))
coords_1sph = leb_grid[:,:3]
# Note the Lebedev angular grids are normalized to 1 in pyscf
weights_1sph = 4*numpy.pi * leb_grid[:,3]
return coords_1sph, weights_1sph
def make_L(pcmobj, r_vdw, ylm_1sph, fi):
# See JCTC, 9, 3637, Eq (18)
mol = pcmobj.mol
natm = mol.natm
lmax = pcmobj.lmax
eta = pcmobj.eta
nlm = (lmax+1)**2
coords_1sph, weights_1sph = make_grids_one_sphere(pcmobj.lebedev_order)
ngrid_1sph = weights_1sph.size
atom_coords = mol.atom_coords()
ylm_1sph = ylm_1sph.reshape(nlm,ngrid_1sph)
# JCP, 141, 184108 Eq (9), (12) is incorrect
# L_diag = <lm|(1/|s-s'|)|l'm'>
# Using Laplace expansion for electrostatic potential 1/r
# L_diag = 4pi/(2l+1)/|s| <lm|l'm'>
L_diag = numpy.zeros((natm,nlm))
p1 = 0
for l in range(lmax+1):
p0, p1 = p1, p1 + (l*2+1)
L_diag[:,p0:p1] = 4*numpy.pi/(l*2+1)
L_diag *= 1./r_vdw.reshape(-1,1)
Lmat = numpy.diag(L_diag.ravel()).reshape(natm,nlm,natm,nlm)
for ja in range(natm):
# scale the weight, precontract d_nj and w_n
# see JCTC 9, 3637, Eq (16) - (18)
# Note all values are scaled by 1/r_vdw to make the formulas
# consistent to Psi in JCP, 141, 184108
part_weights = weights_1sph.copy()
part_weights[fi[ja]>1] /= fi[ja,fi[ja]>1]
for ka in atoms_with_vdw_overlap(ja, atom_coords, r_vdw):
vjk = r_vdw[ja] * coords_1sph + atom_coords[ja] - atom_coords[ka]
tjk = lib.norm(vjk, axis=1) / r_vdw[ka]
wjk = pcmobj.regularize_xt(tjk, eta, r_vdw[ka])
wjk *= part_weights
pol = sph.multipoles(vjk, lmax)
p1 = 0
for l in range(lmax+1):
fac = 4*numpy.pi/(l*2+1) / r_vdw[ka]**(l+1)
p0, p1 = p1, p1 + (l*2+1)
a = numpy.einsum('xn,n,mn->xm', ylm_1sph, wjk, pol[l])
Lmat[ja,:,ka,p0:p1] += -fac * a
return Lmat
def make_fi(pcmobj, r_vdw):
coords_1sph, weights_1sph = make_grids_one_sphere(pcmobj.lebedev_order)
mol = pcmobj.mol
eta = pcmobj.eta
natm = mol.natm
atom_coords = mol.atom_coords()
ngrid_1sph = coords_1sph.shape[0]
fi = numpy.zeros((natm,ngrid_1sph))
for ia in range(natm):
for ja in atoms_with_vdw_overlap(ia, atom_coords, r_vdw):
v = r_vdw[ia]*coords_1sph + atom_coords[ia] - atom_coords[ja]
rv = lib.norm(v, axis=1)
t = rv / r_vdw[ja]
xt = pcmobj.regularize_xt(t, eta, r_vdw[ja])
fi[ia] += xt
fi[fi < 1e-20] = 0
return fi
def make_phi(pcmobj, dm, r_vdw, ui):
mol = pcmobj.mol
natm = mol.natm
coords_1sph, weights_1sph = make_grids_one_sphere(pcmobj.lebedev_order)
ngrid_1sph = coords_1sph.shape[0]
if not (isinstance(dm, numpy.ndarray) and dm.ndim == 2):
dm = dm[0] + dm[1]
tril_dm = lib.pack_tril(dm+dm.T)
nao = dm.shape[0]
diagidx = | numpy.arange(nao) | numpy.arange |
"""WFSC Loop Function."""
import numpy as np
import os
import time
import pickle
import matplotlib.pyplot as plt
import falco
def loop(mp, out):
"""
Loop over the estimator and controller for WFSC.
Parameters
----------
mp : falco.config.ModelParameters
Structure of model parameters
out : falco.config.Object
Output variables
Returns
-------
None
Outputs are included in the objects mp and out.
"""
if type(mp) is not falco.config.ModelParameters:
raise TypeError('Input "mp" must be of type ModelParameters')
pass
# Measured, mean raw contrast in scoring regino of dark hole.
InormHist = np.zeros((mp.Nitr+1,))
# Take initial broadband image
Im = falco.imaging.get_summed_image(mp)
###########################################################################
# Begin the Correction Iterations
###########################################################################
mp.flagCullActHist = np.zeros((mp.Nitr+1,), dtype=bool)
mp.thput_vec = np.zeros(mp.Nitr+1)
for Itr in range(mp.Nitr):
# Start of new estimation+control iteration
print('Iteration: %d / %d\n' % (Itr, mp.Nitr-1), end='')
out.serialDate[Itr] = time.time()
# Re-normalize PSF after latest DM commands
falco.imaging.calc_psf_norm_factor(mp)
# Updated DM data
# Change the selected DMs if using the scheduled EFC controller
if mp.controller.lower() in ['plannedefc']:
mp.dm_ind = mp.dm_ind_sched[Itr]
# Report which DMs are used in this iteration
print('DMs to be used in this iteration = [', end='')
for jj in range(len(mp.dm_ind)):
print(' %d' % (mp.dm_ind[jj]), end='')
print(' ]')
# Fill in History of DM commands to Store
if hasattr(mp, 'dm1'):
if hasattr(mp.dm1, 'V'):
out.dm1.Vall[:, :, Itr] = mp.dm1.V
if hasattr(mp, 'dm2'):
if hasattr(mp.dm2, 'V'):
out.dm2.Vall[:, :, Itr] = mp.dm2.V
if hasattr(mp, 'dm5'):
if hasattr(mp.dm5, 'V'):
out.dm5.Vall[:, :, Itr] = mp.dm5.V
if hasattr(mp, 'dm8'):
if hasattr(mp.dm8, 'V'):
out.dm8.Vall[:, Itr] = mp.dm8.V[:]
if hasattr(mp, 'dm9'):
if hasattr(mp.dm9, 'V'):
out.dm9.Vall[:, Itr] = mp.dm9.V[:]
# Compute the DM surfaces
if np.any(mp.dm_ind == 1):
DM1surf = falco.dm.gen_surf_from_act(mp.dm1, mp.dm1.compact.dx, mp.dm1.compact.Ndm)
else:
DM1surf = np.zeros((mp.dm1.compact.Ndm, mp.dm1.compact.Ndm))
if np.any(mp.dm_ind == 2):
DM2surf = falco.dm.gen_surf_from_act(mp.dm2, mp.dm2.compact.dx, mp.dm2.compact.Ndm)
else:
DM2surf = np.zeros((mp.dm2.compact.Ndm, mp.dm2.compact.Ndm))
# Updated plot and reporting
# Calculate the core throughput (at higher resolution to be more accurate)
thput, ImSimOffaxis = falco.imaging.calc_thput(mp)
if mp.flagFiber:
mp.thput_vec[Itr] = np.max(thput)
else:
mp.thput_vec[Itr] = thput # record keeping
# Compute the current contrast level
InormHist[Itr] = np.mean(Im[mp.Fend.corr.maskBool])
if(any(mp.dm_ind == 1)):
mp.dm1 = falco.dm.enforce_constraints(mp.dm1)
if(any(mp.dm_ind == 2)):
mp.dm2 = falco.dm.enforce_constraints(mp.dm2)
# Plotting
if(mp.flagPlot):
if Itr == 0:
fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2)
# else:
# ax1.clear()
# ax2.clear()
# ax3.clear()
# ax4.clear()
fig.subplots_adjust(hspace=0.4, wspace=0.0)
fig.suptitle(mp.coro+': Iteration %d' % Itr)
im1 = ax1.imshow(np.log10(Im), cmap='magma', interpolation='none',
extent=[np.min(mp.Fend.xisDL), | np.max(mp.Fend.xisDL) | numpy.max |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.