prompt
stringlengths 19
879k
| completion
stringlengths 3
53.8k
| api
stringlengths 8
59
|
---|---|---|
import itertools
import math
import random
from time import time
import numpy as np
import pandas as pd
import torch
from torch.nn import functional as F
from torch.optim.lr_scheduler import ReduceLROnPlateau
EPS = 1e-12
class Trainer:
def __init__(self, model, optimizers, dataset=None, device=None, recon_type=None, z_capacity=None,
u_capacity=None, c_gamma=None, entropy_gamma=None, bc_gamma=None, bc_threshold=None):
self.dataset = dataset
self.device = device
self.model = model.to(self.device)
self.optimizer_warm_up, self.optimizer_model = optimizers
if dataset == 'dsprites':
self.scheduler_warm_up = ReduceLROnPlateau(self.optimizer_warm_up, factor=0.5, patience=1, threshold=1e-1,
threshold_mode='rel', cooldown=2, min_lr=0, eps=1e-06,
verbose=True)
self.scheduler_model = ReduceLROnPlateau(self.optimizer_model, factor=0.5, patience=1, threshold=1e-2,
threshold_mode='rel', cooldown=4, min_lr=0, eps=1e-07,
verbose=True)
else:
self.scheduler_warm_up = ReduceLROnPlateau(self.optimizer_warm_up, factor=0.5, patience=2, threshold=1e-1,
threshold_mode='rel', cooldown=3, min_lr=0, eps=1e-06,
verbose=True)
self.scheduler_model = ReduceLROnPlateau(self.optimizer_model, factor=0.5, patience=2, threshold=1e-2,
threshold_mode='rel', cooldown=4, min_lr=0, eps=1e-07,
verbose=True)
self.recon_type = recon_type
self.z_capacity = z_capacity
self.u_capacity = u_capacity
self.c_gamma = c_gamma
self.entropy_gamma = entropy_gamma
self.bc_gamma = bc_gamma
self.bc_threshold = bc_threshold
# The following variable is used in computing KLD, it is computed here once, for speeding up
self.u_kl_func_valid_indices = torch.zeros(self.model.sum_c_dims, dtype=torch.long, device=self.device, requires_grad=False)
start = 0
for value, disc_dim in enumerate(self.model.c_dims):
self.u_kl_func_valid_indices[start:start + disc_dim] = value
start += disc_dim
# Used in computing KLs and Entropy for each random variable
self.unwrap_mask = torch.zeros(self.model.sum_c_dims, self.model.sum_c_dims, self.model.c_count, device=self.device, requires_grad=False)
start = 0
for dim_idx, size in enumerate(self.model.c_dims):
self.unwrap_mask[torch.arange(start, start + size), torch.arange(start, start + size), dim_idx] = 1
start += size
# Used in computing BC
self.u_valid_prior_BC_mask = torch.zeros(self.model.sum_c_dims, self.model.sum_c_dims, device=self.device, requires_grad=False)
start = 0
for dim_idx, size in enumerate(self.model.c_dims):
indices = itertools.product(range(start, start + size), range(start, start + size))
self.u_valid_prior_BC_mask[list(zip(*indices))] = 1
start += size
self.u_valid_prior_BC_mask.tril_(diagonal=-1)
self.num_steps = 0
self.batch_size = None
def train(self, data_loader, warm_up_loader=None, epochs=10, run_after_epoch=None, run_after_epoch_args=None):
self.batch_size = data_loader.batch_size
self.model.train()
if warm_up_loader is None:
print('No warm-up')
for epoch in range(epochs):
t = time()
warm_up_mean_loss, separated_mean_epoch_loss = self._train_epoch(data_loader, warm_up_loader)
others, kl_z_dims, df = [
list(separated_mean_epoch_loss[:12]),
list(np.round(separated_mean_epoch_loss[12:12+self.model.z_dim], 2)),
pd.DataFrame((100 * separated_mean_epoch_loss[-3 * self.model.c_count:]).round(2).reshape(3, -1).transpose(), columns=['100 * E[KL(U_{C_i})]', '100 * KL(C_i)', '100 * H(q(c_i|x))'])
]
with pd.option_context('display.max_rows', None, 'display.max_columns', None, 'display.width', None):
print("""
Epoch: %d\t MLoss: %.3f \tWarmUpLoss: %.7f\t\tTime: %.2f'
%.3f \t+ %.5f\t+ %.5f\t+ %.5f\t+ %.5f\t+ %.5f
(Recon) \tz %.5f\tu %.5f\tc %.5f\th %.5f\tbc %.5f
z kls: \t%s
\t%s\n""" % (epoch + 1, others[0], warm_up_mean_loss, round((time() - t) / 60, 2), *others[1:], str(kl_z_dims), str(df).replace('\n', '\n\t ')), flush=True)
if warm_up_loader is not None:
self.scheduler_warm_up.step(warm_up_mean_loss)
self.scheduler_model.step(separated_mean_epoch_loss[0])
if run_after_epoch is not None:
run_after_epoch(epoch, *run_after_epoch_args)
def _train_epoch(self, data_loader, warm_up_loader=None):
warm_up_loss, separated_sum_loss = 0, 0
for batch_idx, (data, label) in enumerate(data_loader):
warm_up_loss += self._warm_up(warm_up_loader)
separated_sum_loss += self._train_iteration(data)
return warm_up_loss / (batch_idx + 1), separated_sum_loss / len(data_loader.dataset)
def _warm_up(self, loader):
if not loader:
return 0
epoch_loss = 0
for data, label in loader:
data, label = data.to(self.device), label.to(self.device)
self.optimizer_warm_up.zero_grad()
latent_dist = self.model.encode(data, only_disc_dist=True)
sum_ce = torch.sum(-1 * label * latent_dist['log_c'], dim=1)
loss = torch.mean(sum_ce)
loss.backward()
self.optimizer_warm_up.step()
epoch_loss += loss.item()
if random.random() < 0.001:
print('%.3f' % epoch_loss, end=', ', flush=False)
return epoch_loss
def _train_iteration(self, data):
self.num_steps += 1
data = data.to(self.device)
recon_batch, latent_dist = self.model(data)
loss, separated_mean_loss = self._loss_function(data, recon_batch, latent_dist)
if | np.isnan(separated_mean_loss[0]) | numpy.isnan |
from typing import Union, Tuple
import numpy as np
import pygame
from gym_graph_traffic.envs.intersection import Intersection
class Segment:
def __init__(self, idx: int, length: int, next_intersection: Intersection, to_side, car_density: float,
max_v: int, prob_slow_down: float, intersection_size: int, **kwargs):
self.idx = idx
# graph info
self.length = length
self.next_intersection = next_intersection
# cellular automata parameters
self.car_density = car_density
self.max_v = max_v
self.prob_slow_down = prob_slow_down
# cars positions and velocities
self.p: Union[None, np.ndarray] = None # position vector: 1 if there is a car, 0 otherwise
self.v: Union[None, np.ndarray] = None # velocity vector (of length equal to current number of cars on segment)
# communication with neighbour segments about cars that cross intersections
self.free_init_cells: int = 0
self.new_car_at: Union[None, Tuple[int, int]] = None
self.to_side = to_side
# render
self.road_width = intersection_size / 2
# initialize cars and free init cells
self.reset()
def __str__(self) -> str:
return str(self.idx)
def reset(self) -> None:
self.p = np.random.binomial(1, self.car_density, self.length)
self.v = np.zeros(self.p.nonzero()[0].shape, dtype=np.int8)
self._update_free_init_cells()
def draw(self, surface, light_mode):
pass
(x, y, w, h) = self.next_intersection.segment_draw_coords(self.length, self.to_side)
road_color = (192, 192, 192) if light_mode else (100, 100, 100)
pygame.draw.rect(surface, road_color,
pygame.Rect(x, y, w, h))
dy = w == self.road_width
dx = h == self.road_width
for cx in np.nonzero(self.p)[0]:
cx = cx if self.to_side in "lu" else (self.length - 1 - cx)
car_color = (162, 162, 162) if light_mode else (180, 180, 180)
pygame.draw.rect(surface, car_color,
pygame.Rect((cx * dx + x), (cx * dy + y), self.road_width if dy else 1,
self.road_width if dx else 1))
def total_distance(self) -> int:
"""
:return: Cumulative distance covered by cars during last update.
"""
if self.v.size == 0:
return 0
else:
return int(np.sum(self.v))
def mean_velocity(self) -> float:
"""
:return: Mean velocity of all cars during last update.
"""
if self.v.size == 0:
return 0.0
else:
return float( | np.mean(self.v) | numpy.mean |
# -*- coding: utf-8 -*-
'''
This module defines :class:`ChannelIndex`, a container for multiple
data channels.
:class:`ChannelIndex` derives from :class:`Container`,
from :module:`neo.core.container`.
'''
# needed for Python 3 compatibility
from __future__ import absolute_import, division, print_function
import numpy as np
import quantities as pq
from neo.core.container import Container
class ChannelIndex(Container):
'''
A container for indexing/grouping data channels.
This container has several purposes:
* Grouping all :class:`AnalogSignal`\s inside a :class:`Block`
across :class:`Segment`\s;
* Indexing a subset of the channels within an :class:`AnalogSignal`;
* Container of :class:`Unit`\s. A neuron discharge (:class:`Unit`)
can be seen by several electrodes (e.g. 4 for tetrodes).
*Usage 1* multi :class:`Segment` recording with 2 electrode arrays::
>>> from neo.core import (Block, Segment, ChannelIndex,
... AnalogSignal)
>>> from quantities import nA, kHz
>>> import numpy as np
>>>
>>> # create a Block with 3 Segment and 2 ChannelIndex objects
... blk = Block()
>>> for ind in range(3):
... seg = Segment(name='segment %d' % ind, index=ind)
... blk.segments.append(seg)
...
>>> for ind in range(2):
... chx = ChannelIndex(name='Array probe %d' % ind,
... index=np.arange(64))
... blk.channel_indexes.append(chx)
...
>>> # Populate the Block with AnalogSignal objects
... for seg in blk.segments:
... for chx in blk.channel_indexes:
... a = AnalogSignal(np.random.randn(10000, 64)*nA,
... sampling_rate=10*kHz)
... chx.analogsignals.append(a)
... seg.analogsignals.append(a)
*Usage 2* grouping channels::
>>> from neo.core import Block, ChannelIndex
>>> import numpy as np
>>> from quantities import mV, kHz
>>>
>>> # Create a Block
... blk = Block()
>>> blk.segments.append(Segment())
>>>
>>> # Create a signal with 8 channels
... sig = AnalogSignal(np.random.randn(1000, 8)*mV, sampling_rate=10*kHz)
... blk.segments[0].append(sig)
...
>>> # Create a new ChannelIndex which groups three channels from the signal
... chx = ChannelIndex(channel_names=np.array(['ch1', 'ch4', 'ch6']),
... index=np.array([0, 3, 5])
>>> chx.analogsignals.append(sig)
>>> blk.channel_indexes.append(chx)
*Usage 3* dealing with :class:`Unit` objects::
>>> from neo.core import Block, ChannelIndex, Unit
>>>
>>> # Create a Block
>>> blk = Block()
>>>
>>> # Create a new ChannelIndex and add it to the Block
>>> chx = ChannelIndex(name='octotrode A')
>>> blk.channel_indexes.append(chx)
>>>
>>> # create several Unit objects and add them to the
>>> # ChannelIndex
... for ind in range(5):
... unit = Unit(name = 'unit %d' % ind,
... description='after a long and hard spike sorting')
... chx.units.append(unit)
*Required attributes/properties*:
:channel_indexes: (numpy.array 1D dtype='i')
Index of each channel in the attached signals.
*Recommended attributes/properties*:
:name: (str) A label for the dataset.
:description: (str) Text description.
:file_origin: (str) Filesystem path or URL of the original data file.
:channel_names: (numpy.array 1D dtype='S')
Names for each recording channel.
:coordinates: (quantity array 2D (x, y, z))
Physical or logical coordinates of all channels.
Note: Any other additional arguments are assumed to be user-specific
metadata and stored in :attr:`annotations`.
*Container of*:
:class:`AnalogSignal`
:class:`IrregularlySampledSignal`
:class:`Unit`
'''
_container_child_objects = ('Unit',)
_data_child_objects = ('AnalogSignal', 'IrregularlySampledSignal')
_single_parent_objects = ('Block',)
_necessary_attrs = (('index', np.ndarray, 1, np.dtype('i')),)
_recommended_attrs = ((('channel_names', np.ndarray, 1, np.dtype('S')),
('channel_ids', np.ndarray, 1, np.dtype('i')),
('coordinates', pq.Quantity, 2)) +
Container._recommended_attrs)
def __init__(self, index, channel_names=None, channel_ids=None,
name=None, description=None, file_origin=None,
coordinates=None, **annotations):
'''
Initialize a new :class:`ChannelIndex` instance.
'''
# Inherited initialization
# Sets universally recommended attributes, and places all others
# in annotations
super(ChannelIndex, self).__init__(name=name,
description=description,
file_origin=file_origin,
**annotations)
# Defaults
if channel_names is None:
channel_names = np.array([], dtype='S')
if channel_ids is None:
channel_ids = np.array([], dtype='i')
# Store recommended attributes
self.channel_names = np.array(channel_names)
self.channel_ids = np.array(channel_ids)
self.index = np.array(index)
self.coordinates = coordinates
def __getitem__(self, i):
'''
Get the item or slice :attr:`i`.
'''
index = self.index.__getitem__(i)
if self.channel_names.size > 0:
channel_names = self.channel_names[index]
if not channel_names.shape:
channel_names = [channel_names]
else:
channel_names = None
if self.channel_ids.size > 0:
channel_ids = self.channel_ids[index]
if not channel_ids.shape:
channel_ids = [channel_ids]
else:
channel_ids = None
obj = ChannelIndex(index= | np.arange(index.size) | numpy.arange |
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.animation import FuncAnimation, writers
from scipy.integrate import solve_ivp
def ClassicalBJJEnsemble(Lambda, dE, size):
def f(t, v):
z, phi = v[slice(0, size)], v[slice(size, None)]
val = np.hstack(
[
-np.sqrt(1 - z ** 2) * np.sin(phi),
Lambda * z + z / np.sqrt(1 - z ** 2) * np.cos(phi) + dE,
]
)
return val
return f
def solve(Lambda, dE, gridsize=1000, t_max=20, dt=0.1):
phi = np.linspace(-np.pi, np.pi, gridsize)
z = np.zeros_like(phi)
f = ClassicalBJJEnsemble(Lambda, dE, len(z))
t_max = np.pi * t_max / np.sqrt(1 + Lambda) # Normalization
dt = np.pi * dt / np.sqrt(1 + Lambda)
solution = solve_ivp(f, [0, t_max], np.hstack((z, phi)), max_step=dt)
z, phi = solution.y[slice(0, len(z))].T, solution.y[slice(len(z), None)].T
z = z / (2 / np.sqrt(Lambda)) # Normalization
t = solution.t
return z, phi, t
def animate(i):
scat.set_offsets( | np.array([phi[i], z[i]]) | numpy.array |
import os
import sys
import random
import numpy as np
import pandas as pd
# Please modify to fit your environment
import tensorflow as tf
import tensorflow.contrib.keras.api.keras as keras
from tensorflow.contrib.keras.api.keras import backend, callbacks
from tensorflow.contrib.keras.api.keras.models import Model
from tensorflow.contrib.keras.api.keras.layers import Input
from tensorflow.contrib.keras.api.keras.utils import Progbar
from tensorflow.contrib.keras.api.keras.optimizers import Adam
from functools import partial
from sklearn.neighbors import NearestNeighbors
from sklearn.model_selection import KFold
import matplotlib.pyplot as plt
import macro as mc
import load_data as ld
import preprocess as pre
import models
import compute_relation_vectors as rv
if len(sys.argv) != 2:
print("input error: main.py method_flag")
print("method flag : nontransfer (=0), standard transfer learning (=1), count ver. all transfer deep learning (=2),\
mean ver. all transfer deep learning (=3), mean modified ver. all transfer deep learning (=4)")
sys.exit(1)
_, method_flag = sys.argv
def Neighbors( labels, database, knum ):
nbrs = NearestNeighbors(n_neighbors=knum, algorithm='ball_tree').fit(database)
dis, idx = nbrs.kneighbors(labels)
return dis, idx
def main(method_flag):
# load data
source_df, target_df = ld.load_file()
predicts, corrects = [], []
random.seed(123)
np.random.seed(123)
kf = KFold(shuffle=False,random_state=1,n_splits=mc._FOLD_NUM)
fold_num = 1
cnt = 0
for train, test in kf.split(target_df):
print('{0}/{1}'.format(fold_num, mc._FOLD_NUM))
target_train = target_df.iloc[train]
target_test = target_df.iloc[test]
idx, labels = transfer_model(source_df, target_train, target_test, method_flag, fold_num)
predicts.extend(idx.tolist())
corrects.extend(labels[0].tolist())
fold_num = fold_num+1
# save results
predicts = np.array(predicts)
corrects = np.array(corrects)
err = []
for i in range(len(predicts)):
if predicts[i] == corrects[i]:
err.append(0)
else:
err.append(1)
test = np.concatenate((np.reshape(predicts,[len(predicts),1]),np.reshape(corrects,[len(corrects),1]),\
np.reshape(err,[len(err),1])), axis=1)
save_data = pd.DataFrame(test)
save_data.to_csv('%s'%(mc._RESULT_FILE),index=False,header=False)
#save_data.to_csv('../results/results.csv',index=False,header=False)
fp = open('%s'%(mc._RESULT_FILE),'a')
#fp = open('../results/results.csv','a')
fp.write('%f\n'%((1.0-np.mean(err))*100.0))
fp.close()
def transfer_model(source_df, target_df, test_df, method_flag, fold_num):
source_labels, source_data = np.split(np.array(source_df),[1],axis=1)
target_labels, target_data = np.split(np.array(target_df),[1],axis=1)
test_labels, test_data = np.split(np.array(test_df),[1],axis=1)
# normalization
#normalized_source_data = pre.normalize(source_data)
#normalized_target_data = pre.normalize(target_data)
#normalized_test_data = pre.normalize(test_data)
normalized_source_data = source_data
normalized_target_data = target_data
normalized_test_data = test_data
### constuct model for source domain task ###
# optimization
opt = Adam()
# network setting
latent = models.latent(normalized_source_data.shape[1])
sll = models.source_last_layer()
tll = models.target_last_layer()
source_inputs = Input(shape=normalized_source_data.shape[1:])
latent_features = latent(source_inputs)
source_predictors = sll(latent_features)
latent.trainable = mc._SORUCE_LATENT_TRAIN
source_predictors.trainable = True
source_nn = Model(inputs=[source_inputs], outputs=[source_predictors])
source_nn.compile(loss=['mean_squared_error'],optimizer=opt)
#source_nn.summary()
# training using source domain data
if method_flag != mc._SCRATCH:
source_max_loop = int(normalized_source_data.shape[0]/mc._BATCH_SIZE)
source_progbar = Progbar(target=mc._SOURCE_EPOCH_NUM)
for epoch in range(mc._SOURCE_EPOCH_NUM):
shuffle_data, shuffle_labels, _ = pre.paired_shuffle(normalized_source_data,source_labels,1)
for loop in range(source_max_loop):
batch_train_data = shuffle_data[loop*mc._BATCH_SIZE:(loop+1)*mc._BATCH_SIZE]
batch_train_labels = shuffle_labels[loop*mc._BATCH_SIZE:(loop+1)*mc._BATCH_SIZE]
batch_train_labels = np.reshape(batch_train_labels, [len(batch_train_labels)])
one_hots = np.identity(mc._SOURCE_DIM_NUM)[np.array(batch_train_labels, dtype=np.int32)]
loss = source_nn.train_on_batch([batch_train_data],[one_hots])
#source_progbar.add(1, values=[("source loss",loss)])
# save
#latent.save('../results/source_latent.h5')
#sll.save('../results/source_last_layer.h5')
# compute relation vectors
if method_flag == mc._SCRATCH or method_flag == mc._CONV_TRANSFER:
target_vectors = np.identity(mc._TARGET_DIM_NUM)[np.array(target_labels, dtype=np.int32)]
target_vectors = np.reshape(target_vectors, [target_vectors.shape[0], target_vectors.shape[2]])
elif method_flag == mc._COUNT_ATDL:
target_labels, relations = rv.compute_relation_labels(source_nn, normalized_target_data, target_labels, fold_num)
target_vectors = | np.identity(mc._SOURCE_DIM_NUM) | numpy.identity |
from memfuncs import MemFunc
import json
import matplotlib.pyplot as plt
import numpy as np
labels = ["Car_ID","Risk",'Value_Loss','Horsepower','City_MPG','Highway_MPG','Price']
def boxPlotForData():
data = np.genfromtxt("car_data.csv",delimiter=',')
fig, axes = plt.subplots(nrows=3, ncols=2, figsize=(20, 10))
colors = ["lightblue","lightgreen","pink","lightgoldenrodyellow", 'lightskyblue','lightsalmon']
for i in range(6):
row, col = np.unravel_index(i,(3,2))
bplot = axes[row][col].boxplot(data[:,i+1],vert=True, notch=True,patch_artist=True)
bplot['boxes'][0].set_facecolor(colors[i])
axes[row][col].set_title(labels[i])
plt.title("Box Plots of Car Data")
plt.savefig("graphs/boxplotsCarData.png", bbox_inches='tight')
plt.show()
def histForData():
data = np.genfromtxt("car_data.csv",delimiter=',')
#plt.hist(data[:,1], facecolor='green')
fig, axes = plt.subplots(nrows=3, ncols=2, figsize=(20, 10))
for i in range(6):
row, col = | np.unravel_index(i,(3,2)) | numpy.unravel_index |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from skimage.transform import resize
from random import random
import _pickle
from scipy import misc
from scipy import io
from skimage import color
import glob
import gzip
import math
import numpy as np
import os
from scipy.io import loadmat as loadmat
from six.moves import urllib
from six.moves import xrange
import sys
import tarfile
import tensorflow as tf
FLAGS = tf.flags.FLAGS
def create_dir_if_needed(dest_directory):
"""
Create directory if doesn't exist
:param dest_directory:
:return: True if everything went well
"""
if not tf.gfile.IsDirectory(dest_directory):
tf.gfile.MakeDirs(dest_directory)
return True
def maybe_download(file_urls, directory):
"""
Download a set of files in temporary local folder
:param directory: the directory where to download
:return: a tuple of filepaths corresponding to the files given as input
"""
# Create directory if doesn't exist
assert create_dir_if_needed(directory)
# This list will include all URLS of the local copy of downloaded files
result = []
# For each file of the dataset
for file_url in file_urls:
# Extract filename
filename = file_url.split('/')[-1]
# If downloading from GitHub, remove suffix ?raw=True from local filename
if filename.endswith("?raw=true"):
filename = filename[:-9]
# Deduce local file url
#filepath = os.path.join(directory, filename)
filepath = directory + '/' + filename
# Add to result list
result.append(filepath)
# Test if file already exists
if not tf.gfile.Exists(filepath):
def _progress(count, block_size, total_size):
sys.stdout.write('\r>> Downloading %s %.1f%%' % (filename,
float(count * block_size) / float(total_size) * 100.0))
sys.stdout.flush()
filepath, _ = urllib.request.urlretrieve(file_url, filepath, _progress)
print()
statinfo = os.stat(filepath)
print('Successfully downloaded', filename, statinfo.st_size, 'bytes.')
return result
def image_whitening(data):
"""
Subtracts mean of image and divides by adjusted standard variance (for
stability). Operations are per image but performed for the entire array.
:param image: 4D array (ID, Height, Weight, Channel)
:return: 4D array (ID, Height, Weight, Channel)
"""
assert len(np.shape(data)) == 4
# Compute number of pixels in image
nb_pixels = np.shape(data)[1] * np.shape(data)[2] * np.shape(data)[3]
# Subtract mean
mean = np.mean(data, axis=(1,2,3))
ones = np.ones(np.shape(data)[1:4], dtype=np.float32)
for i in xrange(len(data)):
data[i, :, :, :] -= mean[i] * ones
# Compute adjusted standard variance
adj_std_var = np.maximum(np.ones(len(data), dtype=np.float32) / math.sqrt(nb_pixels), np.std(data, axis=(1,2,3))) #NOLINT(long-line)
# Divide image
for i in xrange(len(data)):
data[i, :, :, :] = data[i, :, :, :] / adj_std_var[i]
print(np.shape(data))
return data
def extract_svhn(local_url):
"""
Extract a MATLAB matrix into two numpy arrays with data and labels
:param local_url:
:return:
"""
with tf.gfile.Open(local_url, mode='rb') as file_obj:
# Load MATLAB matrix using scipy IO
dict = loadmat(file_obj)
# Extract each dictionary (one for data, one for labels)
data, labels = dict["X"], dict["y"]
# Set np type
data = np.asarray(data, dtype=np.float32)
labels = np.asarray(labels, dtype=np.int32)
# Transpose data to match TF model input format
data = data.transpose(3, 0, 1, 2)
# Fix the SVHN labels which label 0s as 10s
labels[labels == 10] = 0
# Fix label dimensions
labels = labels.reshape(len(labels))
return data, labels
def unpickle_cifar_dic(file):
"""
Helper function: unpickles a dictionary (used for loading CIFAR)
:param file: filename of the pickle
:return: tuple of (images, labels)
"""
fo = open(file, 'rb')
dict = _pickle.load(fo, encoding='latin1')
fo.close()
return dict['data'], dict['labels']
def extract_cifar10(local_url, data_dir):
"""
Extracts the CIFAR-10 dataset and return numpy arrays with the different sets
:param local_url: where the tar.gz archive is located locally
:param data_dir: where to extract the archive's file
:return: a tuple (train data, train labels, test data, test labels)
"""
# These numpy dumps can be reloaded to avoid performing the pre-processing
# if they exist in the working directory.
# Changing the order of this list will ruin the indices below.
preprocessed_files = ['/cifar10_train.npy',
'/cifar10_train_labels.npy',
'/cifar10_test.npy',
'/cifar10_test_labels.npy']
all_preprocessed = True
for file in preprocessed_files:
if not tf.gfile.Exists(data_dir + file):
all_preprocessed = False
break
if all_preprocessed:
# Reload pre-processed training data from numpy dumps
with tf.gfile.Open(data_dir + preprocessed_files[0], mode='r') as file_obj:
train_data = np.load(file_obj)
with tf.gfile.Open(data_dir + preprocessed_files[1], mode='r') as file_obj:
train_labels = np.load(file_obj)
# Reload pre-processed testing data from numpy dumps
with tf.gfile.Open(data_dir + preprocessed_files[2], mode='r') as file_obj:
test_data = np.load(file_obj)
with tf.gfile.Open(data_dir + preprocessed_files[3], mode='r') as file_obj:
test_labels = np.load(file_obj)
else:
# Do everything from scratch
# Define lists of all files we should extract
train_files = ["data_batch_" + str(i) for i in xrange(1,6)]
test_file = ["test_batch"]
cifar10_files = train_files + test_file
# Check if all files have already been extracted
need_to_unpack = False
for file in cifar10_files:
if not tf.gfile.Exists(file):
need_to_unpack = True
break
# We have to unpack the archive
if need_to_unpack:
tarfile.open(local_url, 'r:gz').extractall(data_dir)
# Load training images and labels
images = []
labels = []
for file in train_files:
# Construct filename
filename = file
# Unpickle dictionary and extract images and labels
images_tmp, labels_tmp = unpickle_cifar_dic(filename)
# Append to lists
images.append(images_tmp)
labels.append(labels_tmp)
# Convert to numpy arrays and reshape in the expected format
train_data = np.asarray(images, dtype=np.float32).reshape((50000,3,32,32))
train_data = np.swapaxes(train_data, 1, 3)
train_labels = np.asarray(labels, dtype=np.int32).reshape(50000)
# Save so we don't have to do this again
np.save(data_dir + preprocessed_files[0], train_data)
np.save(data_dir + preprocessed_files[1], train_labels)
# Construct filename for test file
filename = data_dir + "/cifar-10-batches-py/" + test_file[0]
# Load test images and labels
test_data, test_images = unpickle_cifar_dic(filename)
# Convert to numpy arrays and reshape in the expected format
test_data = np.asarray(test_data,dtype=np.float32).reshape((10000,3,32,32))
test_data = np.swapaxes(test_data, 1, 3)
test_labels = np.asarray(test_images, dtype=np.int32).reshape(10000)
# Save so we don't have to do this again
np.save(data_dir + preprocessed_files[2], test_data)
np.save(data_dir + preprocessed_files[3], test_labels)
return train_data, train_labels, test_data, test_labels
# Data from http://cswww.essex.ac.uk/mv/allfaces/faces95.html
def extract_faces(data_dir):
preprocessed_files = ['/faces_train.npy',
'/faces_train_labels.npy',
'/faces_test.npy',
'/faces_test_labels.npy']
all_preprocessed = True
for file in preprocessed_files:
if not tf.gfile.Exists(data_dir + file):
all_preprocessed = False
break
# if False:
if all_preprocessed:
# Reload pre-processed training data from numpy dumps
train_data = np.load(data_dir + preprocessed_files[0])
train_labels = np.load(data_dir + preprocessed_files[1])
# Reload pre-processed testing data from numpy dumps
test_data = np.load(data_dir + preprocessed_files[2])
test_labels = np.load(data_dir + preprocessed_files[3])
else:
# Do everything from scratch
# Define lists of all files we should extract
train_files = glob.iglob("/home/Cooper/faces95/*")
# Load training images and labels
images = []
labels = []
test_images = []
test_labels = []
for label,person in enumerate(train_files):
pictures = list(glob.iglob(person+"/*"))
for picture in pictures[0:15]:
images.append(resize(misc.imread(picture),(30,30)))
labels.append(label)
for picture in pictures[15:20]:
test_images.append(resize(misc.imread(picture),(30,30)))
test_labels.append(label)
p = np.random.permutation(len(images))
images = np.put(np.zeros(len(images)),p,images)
labels = np.put(np.zeros(len(images)),p,labels)
# Convert to numpy arrays and reshape in the expected format
train_data = np.asarray(images, dtype=np.float32).reshape((72*15,3,30,30))
train_data = np.swapaxes(train_data, 1, 3)
train_labels = np.asarray(labels, dtype=np.int32).reshape(72*15)
# Save so we don't have to do this again
np.save(data_dir + preprocessed_files[0], train_data)
np.save(data_dir + preprocessed_files[1], train_labels)
# Convert to numpy arrays and reshape in the expected format
test_data = np.asarray(test_images,dtype=np.float32).reshape((72*5,3,30,30))
test_data = np.swapaxes(test_data, 1, 3)
test_labels = np.asarray(test_labels, dtype=np.int32).reshape(72*5)
# Save so we don't have to do this again
np.save(data_dir + preprocessed_files[2], test_data)
np.save(data_dir + preprocessed_files[3], test_labels)
return train_data, train_labels, test_data, test_labels
def extract_mnist_data(filename, num_images, image_size, pixel_depth):
"""
Extract the images into a 4D tensor [image index, y, x, channels].
Values are rescaled from [0, 255] down to [-0.5, 0.5].
"""
# if not os.path.exists(file):
if not tf.gfile.Exists(filename+".npy"):
with gzip.open(filename) as bytestream:
bytestream.read(16)
buf = bytestream.read(image_size * image_size * num_images)
data = np.frombuffer(buf, dtype=np.uint8).astype(np.float32)
data = (data - (pixel_depth / 2.0)) / pixel_depth
data = data.reshape(num_images, image_size, image_size, 1)
#np.save(filename, data)
return data
else:
with tf.gfile.Open(filename+".npy", mode='r') as file_obj:
return np.load(file_obj)
def extract_mnist_labels(filename, num_images):
"""
Extract the labels into a vector of int64 label IDs.
"""
# if not os.path.exists(file):
if not tf.gfile.Exists(filename+".npy"):
with gzip.open(filename) as bytestream:
bytestream.read(8)
buf = bytestream.read(1 * num_images)
labels = np.frombuffer(buf, dtype=np.uint8).astype(np.int32)
#np.save(filename, labels)
return labels
else:
with tf.gfile.Open(filename+".npy", mode='r') as file_obj:
return np.load(file_obj)
def extract_netflix(data_dir):
preprocessed_files = ['/netflix_train.npy',
'/netflix_train_labels.npy',
'/netflix_valid.npy',
'/netflix_valid_labels.npy',
'/netflix_test.npy',
'/netflix_test_labels.npy']
all_preprocessed = True
for file in preprocessed_files:
if not tf.gfile.Exists(data_dir + file):
all_preprocessed = False
break
if all_preprocessed:
#Reload pre-processed training data from numpy dumps
train_data = np.load(data_dir + preprocessed_files[0])
train_labels = np.load(data_dir + preprocessed_files[1])
#Reload pre-processed validation data from numpy dumps
valid_data = np.load(data_dir + preprocessed_files[2])
valid_data = np.load(data_dir + preprocessed_files[3])
#Reload pre-processed testing data from numpy dumps
test_data = np.load(data_dir + preprocessed_files[4])
test_data = np.load(data_dir + preprocessed_files[5])
else:
#Do all pre-processing from scratch
train_file_path = "/home/logan_ford16/models/research/differential_privacy/multiple_teachers/DeepRecommender/Netflix/N3M_TRAIN/n3m.train.txt"
valid_file_path = "/home/logan_ford16/models/research/differential_privacy/multiple_teachers/DeepRecommender/Netflix/N3M_VALID/n3m.valid.txt"
test_file_path = "/home/logan_ford16/models/research/differential_privacy/multiple_teachers/DeepRecommender/Netflix/N3M_TEST/n3m.test.txt"
with open(train_file_path, 'r') as src:
for line in src.readlines():
parts = line.strip().split('\t')
if len(parts) < 3:
raise ValueError('Encountered badly formatted line in {}'.format(train_file_path))
def ld_svhn(extended=False, test_only=False):
"""
Load the original SVHN data
:param extended: include extended training data in the returned array
:param test_only: disables loading of both train and extra -> large speed up
:return: tuple of arrays which depend on the parameters
"""
# Define files to be downloaded
# WARNING: changing the order of this list will break indices (cf. below)
file_urls = ['http://ufldl.stanford.edu/housenumbers/train_32x32.mat',
'http://ufldl.stanford.edu/housenumbers/test_32x32.mat',
'http://ufldl.stanford.edu/housenumbers/extra_32x32.mat']
# Maybe download data and retrieve local storage urls
local_urls = maybe_download(file_urls, FLAGS.data_dir)
# Extra Train, Test, and Extended Train data
if not test_only:
# Load and applying whitening to train data
train_data, train_labels = extract_svhn(local_urls[0])
train_data = image_whitening(train_data)
# Load and applying whitening to extended train data
ext_data, ext_labels = extract_svhn(local_urls[2])
ext_data = image_whitening(ext_data)
# Load and applying whitening to test data
test_data, test_labels = extract_svhn(local_urls[1])
test_data = image_whitening(test_data)
if test_only:
return test_data, test_labels
else:
if extended:
# Stack train data with the extended training data
train_data = np.vstack((train_data, ext_data))
train_labels = np.hstack((train_labels, ext_labels))
return train_data, train_labels, test_data, test_labels
else:
# Return training and extended training data separately
return train_data,train_labels, test_data,test_labels, ext_data,ext_labels
def ld_cifar10(test_only=False):
"""
Load the original CIFAR10 data
:param extended: include extended training data in the returned array
:param test_only: disables loading of both train and extra -> large speed up
:return: tuple of arrays which depend on the parameters
"""
# Define files to be downloaded
file_urls = ['https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz']
# Maybe download data and retrieve local storage urls
local_urls = maybe_download(file_urls, FLAGS.data_dir)
# Extract archives and return different sets
dataset = extract_cifar10(local_urls[0], FLAGS.data_dir)
# Unpack tuple
train_data, train_labels, test_data, test_labels = dataset
# Apply whitening to input data
train_data = image_whitening(train_data)
test_data = image_whitening(test_data)
if test_only:
return test_data, test_labels
else:
return train_data, train_labels, test_data, test_labels
def ld_mnist(test_only=False):
"""
Load the MNIST dataset
:param extended: include extended training data in the returned array
:param test_only: disables loading of both train and extra -> large speed up
:return: tuple of arrays which depend on the parameters
"""
# Define files to be downloaded
# WARNING: changing the order of this list will break indices (cf. below)
file_urls = ['http://yann.lecun.com/exdb/mnist/train-images-idx3-ubyte.gz',
'http://yann.lecun.com/exdb/mnist/train-labels-idx1-ubyte.gz',
'http://yann.lecun.com/exdb/mnist/t10k-images-idx3-ubyte.gz',
'http://yann.lecun.com/exdb/mnist/t10k-labels-idx1-ubyte.gz',
]
# Maybe download data and retrieve local storage urls
local_urls = maybe_download(file_urls, FLAGS.data_dir)
# Extract it into np arrays.
train_data = extract_mnist_data(local_urls[0], 60000, 28, 1)
train_labels = extract_mnist_labels(local_urls[1], 60000)
test_data = extract_mnist_data(local_urls[2], 10000, 28, 1)
test_labels = extract_mnist_labels(local_urls[3], 10000)
if test_only:
return test_data, test_labels
else:
return train_data, train_labels, test_data, test_labels
def extract_wiki(data_dir):
preprocessed_files = ['/wiki_train.npy',
'/wiki_train_labels.npy',
'/wiki_test.npy',
'/wiki_test_labels.npy']
all_preprocessed = True
for file in preprocessed_files:
if not tf.gfile.Exists(data_dir + file):
all_preprocessed = False
break
# if False:
if all_preprocessed:
# Reload pre-processed training data from numpy dumps
train_data = np.load(data_dir + preprocessed_files[0])
train_labels = np.load(data_dir + preprocessed_files[1])
# Reload pre-processed testing data from numpy dumps
test_data = np.load(data_dir + preprocessed_files[2])
test_labels = np.load(data_dir + preprocessed_files[3])
else:
# Do everything from scratch
# Define lists of all files we should extract
# Load training images and labels
images = []
labels = []
test_images = []
test_labels = []
m = io.loadmat("/home/Cooper/wiki_crop/wiki.mat")
n = len(m["wiki"]["full_path"][0][0][0])
for i,file in enumerate(m["wiki"]["full_path"][0][0][0]):
# if i > 10:
# break
print(i,n)
picture = "/home/Cooper/wiki_crop/"+file[0]
proportion = 0.8
r = random()
im = resize(color.rgb2gray(misc.imread(picture)),(256,256))
label = m["wiki"]["gender"][0][0][0][i]
if not math.isnan(label):
if r < proportion:
images.append(im)
labels.append(int(label))
else:
test_images.append(im)
test_labels.append(int(label))
# p = np.random.permutation(len(images))
# images = np.put(np.zeros(len(images)),p,images)
# labels = np.put(np.zeros(len(images)),p,labels)
# Convert to numpy arrays and reshape in the expected format
train_data = np.asarray(images, dtype=np.float32).reshape((len(labels),256,256,1))
# train_data = np.swapaxes(train_data, 1, 3)
train_labels = np.asarray(labels, dtype=np.int32).reshape(len(labels))
# Save so we don't have to do this again
np.save(data_dir + preprocessed_files[0], train_data)
np.save(data_dir + preprocessed_files[1], train_labels)
# Convert to numpy arrays and reshape in the expected format
test_data = np.asarray(test_images,dtype=np.float32).reshape((len(test_labels),256,256,1))
# test_data = np.swapaxes(test_data, 1, 3)
test_labels = np.asarray(test_labels, dtype=np.int32).reshape(len(test_labels))
# Save so we don't have to do this again
np.save(data_dir + preprocessed_files[2], test_data)
np.save(data_dir + preprocessed_files[3], test_labels)
return train_data, train_labels, test_data, test_labels
def extract_imdb(data_dir):
preprocessed_files = ['/imdb_train.npy',
'/imdb_train_labels.npy',
'/imdb_test.npy',
'/imdb_test_labels.npy']
all_preprocessed = True
for file in preprocessed_files:
if not tf.gfile.Exists(data_dir + file):
all_preprocessed = False
break
if all_preprocessed:
# Reload pre-processed training data from numpy dumps
train_data = | np.load(data_dir + preprocessed_files[0]) | numpy.load |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Nov 12 22:58:41 2021
@author: cadecastro.com
"""
import numpy as np
import matplotlib.pyplot as plt
N=int(input('Número de puntos, menor que 1000, N='))
if N<=1000:
n=np.linspace(0,N,N+1)
F=np.zeros(N+1)
C= | np.ones(N+1) | numpy.ones |
import os
import cv2
import xml.etree.ElementTree as ET
from math import cos, sin, radians
import numpy as np
from .finders import ThresholdFinder, AdaptiveThresholdFinder, OtsuThresholdFinder
from .targeters import CoreTargeter, RimTargeter, MomentsTargeter, SimpleBlobTargeter
from .generators import ChromiumGenerator, GeoStarGenerator
from PyQt5.QtGui import QTransform, QPolygonF
from PyQt5.QtCore import QPointF
class LACVController:
finders = [ThresholdFinder, AdaptiveThresholdFinder, OtsuThresholdFinder]
targeters = [CoreTargeter, RimTargeter, MomentsTargeter, SimpleBlobTargeter]
generators = [ChromiumGenerator, GeoStarGenerator]
finder = None
targeter = None
generator = None
global_finder_settings = {
'area': {
'enabled': True,
'min': 100,
'max': 1e6
},
'circularity': {
'enabled': True,
'min': 0,
'max': 2
},
'convexity': {
'enabled': False,
'min': 0,
'max': 2
},
'match': {
'enabled': True,
'min': 0,
'max': 2
}
}
def set_source(self, source):
if len(source) < 1:
print("No source chosen")
return
print('source=%s' % source)
# Find complement
source_dir = os.path.dirname(source)
image_file = ""
align_file = ""
print('source_dir=%s' % source_dir)
if source.lower().endswith(".align"):
print("given an align file")
align_file = os.path.basename(source)
file_root = align_file.split(".")[0]
for file in os.listdir(source_dir):
print(file)
if file == align_file:
continue
elif file.startswith(file_root) and file.split(".")[1] in ['bmp', 'jpg', 'png', 'tiff']:
image_file = file
break
else:
print("given an image file")
image_file = os.path.basename(source)
file_root = image_file.split(".")[0]
for file in os.listdir(source_dir):
print(file)
if file == image_file:
continue
elif file.startswith(file_root) and file.split(".")[1].lower() == "align":
align_file = file
break
print('align file=%s'%align_file)
print('image file=%s'%image_file)
if not align_file or not image_file:
print('Could not find the image/align pair... abort!')
self._source_image = None
return
print('loading image file:%s'%os.path.join(source_dir, image_file))
self._source_image = cv2.imread(os.path.join(source_dir, image_file))
#self._source_image = noisy('gauss', self._source_image).astype(np.uint8)
print('processing align file:%s'%os.path.join(source_dir, align_file))
align_xml = ET.parse(os.path.join(source_dir, align_file))
align_root = align_xml.getroot()
align = align_root.find('Alignment')
self.align_rotation = float(align.find('Rotation').text)
self.align_center = [float(x) for x in align.find('Center').text.split(',')]
self.align_size = [float(x) for x in align.find('Size').text.split(',')]
print('microns per pixel = %f'%(self.microns_per_pixel()))
xc, yc = self.align_center[0], self.align_center[1]
xmin, ymin = xc - self.align_size[0]/2.0, yc - self.align_size[1]/2.0
xmax, ymax = xc + self.align_size[0]/2.0, yc + self.align_size[1]/2.0
r = -self.align_rotation
def rot(x, y):
xp = xc + (x - xc)*cos(radians(r)) - (y-yc)*sin(radians(r))
yp = yc + (x - xc)*sin(radians(r)) - (y-yc)*cos(radians(r))
return xp, yp
src = np.float32([
[0, 0],
[0, self._source_image.shape[0]],
[self._source_image.shape[1], self._source_image.shape[0]]
])
dst = np.float32([
rot(xmin, ymin),
rot(xmin, ymax),
rot(xmax, ymax)
])
self.transform = cv2.getAffineTransform(src, dst)
def microns_per_pixel(self):
return np.array( [self.align_size[0]/self._source_image.shape[1], self.align_size[1]/self._source_image.shape[0] ]).mean()
def coords_in_image_to_cellspace(self, coords):
x, y = coords
points = np.array([ coords ])
ones = np.ones(shape=(len(points), 1))
points_ones = np.hstack([points, ones])
cs = self.transform.dot(points_ones.T).T
return cs[0]
def source_image(self):
return self._source_image
def noisy(noise_typ,image):
if noise_typ == "gauss":
row,col,ch= image.shape
mean = 0
var = 20
sigma = var**0.5
gauss = np.random.normal(mean,sigma,(row,col,ch))
gauss = gauss.reshape(row,col,ch)
noisy = image + gauss
return noisy
elif noise_typ == "s&p":
row,col,ch = image.shape
s_vs_p = 0.5
amount = 0.004
out = np.copy(image)
# Salt mode
num_salt = np.ceil(amount * image.size * s_vs_p)
coords = [np.random.randint(0, i - 1, int(num_salt)) for i in image.shape]
out[coords] = 1
# Pepper mode
num_pepper = np.ceil(amount* image.size * (1. - s_vs_p))
coords = [np.random.randint(0, i - 1, int(num_pepper)) for i in image.shape]
out[coords] = 0
return out
elif noise_typ == "poisson":
vals = len(np.unique(image))
vals = 2 ** np.ceil(np.log2(vals))
noisy = np.random.poisson(image * vals) / float(vals)
return noisy
elif noise_typ =="speckle":
row,col,ch = image.shape
gauss = | np.random.randn(row,col,ch) | numpy.random.randn |
# -*- coding: utf-8 -*-
from __future__ import division, absolute_import, print_function
import sys
import numpy as np
from numpy.testing import (
run_module_suite, assert_, assert_equal, assert_raises, assert_warns
)
import textwrap
class TestArrayRepr(object):
def test_nan_inf(self):
x = np.array([np.nan, np.inf])
assert_equal(repr(x), 'array([nan, inf])')
def test_subclass(self):
class sub(np.ndarray): pass
# one dimensional
x1d = np.array([1, 2]).view(sub)
assert_equal(repr(x1d), 'sub([1, 2])')
# two dimensional
x2d = np.array([[1, 2], [3, 4]]).view(sub)
assert_equal(repr(x2d),
'sub([[1, 2],\n'
' [3, 4]])')
# two dimensional with flexible dtype
xstruct = np.ones((2,2), dtype=[('a', 'i4')]).view(sub)
assert_equal(repr(xstruct),
"sub([[(1,), (1,)],\n"
" [(1,), (1,)]], dtype=[('a', '<i4')])"
)
def test_self_containing(self):
arr0d = np.array(None)
arr0d[()] = arr0d
assert_equal(repr(arr0d),
'array(array(..., dtype=object), dtype=object)')
arr1d = np.array([None, None])
arr1d[1] = arr1d
assert_equal(repr(arr1d),
'array([None, array(..., dtype=object)], dtype=object)')
first = np.array(None)
second = np.array(None)
first[()] = second
second[()] = first
assert_equal(repr(first),
'array(array(array(..., dtype=object), dtype=object), dtype=object)')
def test_containing_list(self):
# printing square brackets directly would be ambiguuous
arr1d = np.array([None, None])
arr1d[0] = [1, 2]
arr1d[1] = [3]
assert_equal(repr(arr1d),
'array([list([1, 2]), list([3])], dtype=object)')
def test_void_scalar_recursion(self):
# gh-9345
repr(np.void(b'test')) # RecursionError ?
class TestComplexArray(object):
def test_str(self):
rvals = [0, 1, -1, np.inf, -np.inf, np.nan]
cvals = [complex(rp, ip) for rp in rvals for ip in rvals]
dtypes = [np.complex64, np.cdouble, np.clongdouble]
actual = [str( | np.array([c], dt) | numpy.array |
""" Class for DLA Surveys
"""
from __future__ import print_function, absolute_import, division, unicode_literals
import numpy as np
import os
import imp, glob
import pdb
import warnings
try:
from urllib2 import urlopen # Python 2.7
except ImportError:
from urllib.request import urlopen
from pkg_resources import resource_filename
from astropy.table import Column, Table, vstack
from astropy import units as u
from astropy.stats import poisson_conf_interval as aspci
from astropy import constants as const
from astropy.cosmology import core as acc
from astropy.coordinates import SkyCoord, match_coordinates_sky
from linetools import utils as ltu
from pyigm.surveys.igmsurvey import IGMSurvey
from pyigm.surveys import utils as pyisu
from pyigm import utils as pyigmu
pyigm_path = imp.find_module('pyigm')[1]
lz_boot_file = resource_filename('pyigm', 'data/DLA/dla_lz_boot.fits.gz')
# Class for DLA Survey
class DLASurvey(IGMSurvey):
"""An DLA Survey class
Attributes:
"""
@classmethod
def load_HST16(cls, sample='stat'):
""" HST Survey by Neeleman+16
<NAME>. et al. 2016, ApJ, 818, 113
Parameters
----------
sample : str, optional
Returns
-------
dla_survey
"""
# Read DLAs
dat_file = resource_filename('pyigm', '/data/DLA/HST/HSTDLA.dat')
dlas = Table.read(dat_file, format='ascii')
# Read Quasars
#qsos = Table.read(pyigm_path + '/all_qso_table.txt', format='ascii')
# Read Sightlines
srvy_file = resource_filename('pyigm', '/data/DLA/HST/hstpath.dat')
survey = Table.read(srvy_file, format='ascii')
# Add info to DLA table
ras, decs, zems, scoords = [], [], [], []
for dla in dlas:
mt = np.where(survey['QSO'] == dla['NAME'])[0]
if len(mt) == 0:
pdb.set_trace()
raise ValueError("Uh oh")
else:
mt = mt[0]
# Generate RA/DEC
row = survey[mt]
scoords.append('{:02d}:{:02d}:{:f} {:s}{:02d}:{:02d}:{:f}'.format(
row['RAh'], row['RAm'], row['RAs'], row['DE-'], row['DEd'], row['DEm'],
row['DEs']))
#ras.append(coord.ra.value)
#decs.append(coord.dec.value)
# zem
zems.append(row['ZEM'])
#dlas['RA'] = ras
#dlas['DEC'] = decs
dlas['QSO_ZEM'] = zems
# Instantiate
coords = SkyCoord(scoords, unit=(u.hourangle, u.deg))
dla_survey = cls.from_sfits(dlas, coords)
dla_survey.ref = 'Neeleman+16'
# Fiddle a bit
survey.rename_column('STTMIN', 'Z_START')
survey.rename_column('STTMAX', 'Z_END')
stat = survey['Z_END'] > 0
stat_survey = survey[stat]
# Restrict to statistical sightlines
if sample == 'stat':
stat_survey = stat_survey[stat_survey['F_STT'] == 1]
ras, decs, zems = [], [], []
for row in stat_survey:
coord = ltu.radec_to_coord('J{:02d}{:02d}{:f}{:s}{:02d}{:02d}{:f}'.format(
row['RAh'], row['RAm'], row['RAs'], row['DE-'], row['DEd'], row['DEm'],
row['DEs']))
ras.append(coord.ra.value)
decs.append(coord.dec.value)
stat_survey['RA'] = ras
stat_survey['DEC'] = decs
stat_survey['FLG_BAL'] = 0
# Sightlines
dla_survey.sightlines = stat_survey
# Stat?
if sample in ['all', 'all_sys']:
return dla_survey
mask = dla_stat(dla_survey, stat_survey)
if sample == 'stat':
dla_survey.mask = mask & (dlas['STAT_FLG'] == 1)
else:
dla_survey.mask = ~mask
# Return
return dla_survey
@classmethod
def load_H100(cls, grab_spectra=False, build_abs_sys=True, isys_path=None):
""" Sample of unbiased HIRES DLAs compiled and analyzed by Neeleman+13
<NAME> al. 2013, ApJ, 769, 54
Parameters
----------
build_abs_sys : bool, optional
Build AbsSystem objects (~10s)
Required for a fair bit of other things, e.g. kin
isys_path : str, optional
Read system files from this path
grab_spectra : bool, optional
Grab 1D spectra? (141Mb)
deprecated.. Use igmspec
Return
------
dla_survey : DLASurvey
"""
# Pull from Internet (as necessary)
summ_fil = resource_filename('pyigm', "/data/DLA/H100/H100_DLA.fits")
print('H100: Loading summary file {:s}'.format(summ_fil))
# Ions
ions_fil = resource_filename('pyigm', "/data/DLA/H100/H100_DLA_ions.json")
print('H100: Loading ions file {:s}'.format(ions_fil))
# Transitions
trans_fil = resource_filename('pyigm', "/data/DLA/H100/H100_DLA_clms.tar.gz")
# System files
sys_files = resource_filename('pyigm', "/data/DLA/H100/H100_DLA_sys.tar.gz")
print('H100: Loading systems. This takes ~10s')
dla_survey = pyisu.load_sys_files(sys_files, 'DLA', build_abs_sys=build_abs_sys)
# Reset flag_NHI (which has been wrong)
for key in dla_survey._dict.keys():
dla_survey._dict[key]['flag_NHI'] = 1
# Fill ion Tables
if build_abs_sys:
print("Filling the _ionN tables...")
dla_survey.fill_ions(use_components=True)
dla_survey.ref = 'Neeleman+13'
if not build_abs_sys:
print("Not loading up all the other data. Use build_abs_sys=True for that!")
return dla_survey
# Metallicities
tbl2_file = resource_filename('pyigm', "/data/DLA/H100/H100_table2.dat")
tbl2 = Table.read(tbl2_file, format='cds')
# Parse for matching
names = dla_survey._data['Name']
qsonames = []
zabs = []
for name in names:
prs = name.split('_')
qsonames.append(prs[0])
try:
zabs.append(float(prs[-1][1:]))
except ValueError:
pdb.set_trace()
qsonames = np.array(qsonames)
zabs = np.array(zabs)
# Match
for ii, iqso, izabs in zip(range(len(tbl2)), tbl2['QSO'], tbl2['zabs']):
mt = np.where((qsonames == iqso) & (np.abs(izabs-zabs) < 1e-3))[0]
if len(mt) == 0:
pdb.set_trace()
elif len(mt) != 1:
pdb.set_trace()
# Metallicity
dla_survey._abs_sys[mt[0]].ZH = tbl2['[M/H]'][ii]
dla_survey._abs_sys[mt[0]].sig_ZH = tbl2['e_[M/H]'][ii]
if tbl2['M'][ii] in ['S','Si','O']:
dla_survey._abs_sys[mt[0]].flag_ZH = 1 # Alpha
elif tbl2['M'][ii] in ['Zn']:
dla_survey._abs_sys[mt[0]].flag_ZH = 2 # Zn
elif tbl2['M'][ii] in ['Fe']:
dla_survey._abs_sys[mt[0]].flag_ZH = 4 # Fe
else:
raise ValueError("Bad metal")
dla_survey._abs_sys[mt[0]].elm_Z = tbl2['M'][ii]
# Kin
dla_survey._abs_sys[mt[0]].kin['dv'] = tbl2['dv'][ii]
dla_survey._abs_sys[mt[0]].kin['trans'] = tbl2['trans'][ii]
dla_survey._abs_sys[mt[0]].selection = tbl2['Select'][ii]
spath = pyigm_path+"/data/DLA/H100/Spectra/"
for dla in dla_survey._abs_sys:
dla.spec_path = spath
# Spectra?
if grab_spectra:
warnings.warn("All of these spectra are in igmspec at https://github.com/specdb/specdb")
print("Grab them there!")
print("All done!!")
return dla_survey
@classmethod
def load_SDSS_DR5(cls, sample='stat'):
""" Load the DLA from the SDSS-DR5 survey
(Prochaska & Wolfe 2009, ApJ, 696, 1543)
Parameters
----------
sample : str, optional
DLA sample
stat : Statistical sample
all : All DLA (NHI >= 20.3)
all_sys : All systems identified -- Returns an LLSSurvey instead
nonstat : Non-statistical sample
Returns
-------
dla_survey : DLASurvey
"""
from .llssurvey import LLSSurvey
import warnings
# LLS File
dla_fil = resource_filename('pyigm','/data/DLA/SDSS_DR5/dr5_alldla.fits.gz')
print('SDSS-DR5: Loading DLA file {:s}'.format(dla_fil))
dlas = Table.read(dla_fil)
# Rename some columns?
dlas.rename_column('QSO_RA', 'RA')
dlas.rename_column('QSO_DEC', 'DEC')
# Generate coords
scoords = [dlas['RA'][ii]+' '+dlas['DEC'][ii] for ii in range(len(dlas))]
coords = SkyCoord(scoords, unit=(u.hourangle, u.deg))
# Cut on NHI
if sample != 'all_sys':
gd_dla = dlas['NHI'] >= 20.3
dla_survey = cls.from_sfits(dlas[gd_dla], coords=coords[gd_dla])
else:
warnings.warn("Loading an LLSSurvey not a DLASurvey")
dla_survey = LLSSurvey.from_sfits(dlas, coords=coords)
# Read
dla_survey.ref = 'SDSS-DR5 (PW09)'
# g(z) file
qsos_fil = resource_filename('pyigm','/data/DLA/SDSS_DR5/dr5_dlagz_s2n4.fits')
print('SDSS-DR5: Loading QSOs file {:s}'.format(qsos_fil))
qsos = Table.read(qsos_fil)
qsos.rename_column('Z1', 'Z_START')
qsos.rename_column('Z2', 'Z_END')
qsos.remove_column('DX')
# Reformat
new_cols = []
for key in qsos.keys():
if key in ['GZZ', 'GZV']:
continue
# New one
new_cols.append(Column(qsos[key].flatten(), name=key))
newqsos = Table(new_cols)
newqsos['RA'].unit = u.deg
newqsos['DEC'].unit = u.deg
dla_survey.sightlines = newqsos
# All?
if sample in ['all', 'all_sys']:
return dla_survey
# Stat
# Generate mask
print('SDSS-DR5: Performing stats')
mask = dla_stat(dla_survey, newqsos)
if sample == 'stat':
dla_survey.mask = mask
else:
dla_survey.mask = ~mask
# Return
print('SDSS-DR5: Loaded')
return dla_survey
@classmethod
def load_lit(cls, dla_fil, qsos_fil, ref, sample='stat', fmt=None,
Pdla_fil=None, **kwargs):
""" Load the DLA from a literature sample using the files
provided by Ruben (see Sanchez-Ramirez et al. 2016, MNRAS, 456, 4488)
Parameters
----------
dla_fil : str or Table
Name of file containting a Table (or the Table itself) on DLAs
qsos_fil : str or Table
Name of file containting a Table (or the Table itself) on QSO sightlines
fmt : str, optional
Format for Table.read()
sample : str, optional
DLA sample
stat : Statistical sample
all : All LLS
nonstat : Non-statistical sample
Pdla_fil : str, optional
Additonal table of Proximate DLAs
**kwargs : optional
Passed to dla_stat()
Returns
-------
dla_survey : DLASurvey
"""
# DLA files
stat_dlas = Table.read(dla_fil, format=fmt)
if Pdla_fil is not None:
Pdlas = Table.read(Pdla_fil)
dlas = vstack([stat_dlas,Pdlas])
else:
dlas = stat_dlas
# Rename some columns?
try:
dlas.rename_column('logN', 'NHI')
except KeyError:
pass
# Cut on NHI
gd_dla = dlas['NHI'] >= 20.3
# Read
dla_survey = cls.from_sfits(dlas[gd_dla])
dla_survey.ref = ref
# g(z) file
print('Loading QSOs file {:s}'.format(qsos_fil))
qsos = Table.read(qsos_fil, format=fmt)
try:
qsos.rename_column('zmin', 'Z_START')
except KeyError:
pass
else:
qsos.rename_column('zmax', 'Z_END')
qsos.rename_column('Dec', 'DEC')
qsos.rename_column('zem', 'ZEM')
dla_survey.sightlines = qsos
# Add zem?
if 'zem' not in dla_survey._data.keys():
scoord = SkyCoord(ra=qsos['RA'], dec=qsos['DEC'], unit='deg')
dcoord = SkyCoord(ra=dla_survey._data['RA'], dec=dla_survey._data['DEC'], unit='deg')
idx, d2d, _ = match_coordinates_sky(dcoord, scoord, nthneighbor=1)
assert np.min(d2d) < 1*u.arcsec
#
dla_survey._data['zem'] = qsos['ZEM'][idx]
# BAL?
if 'FLG_BAL' not in qsos.keys():
qsos['FLG_BAL'] = 0
# All?
if sample == 'all':
return dla_survey
# Stat
# Generate mask (True = good)
mask = dla_stat(dla_survey, qsos, **kwargs)
if sample == 'stat':
dla_survey.mask = mask
else:
dla_survey.mask = ~mask
# Return
print('Loaded survey')
return dla_survey
@classmethod
def load_P03(cls, sample='stat'):
""" Load the DLA from the Peroux+03 survey
(Peroux et al. 2003, MNRAS, 346, 1103)
IUE dataset has been removed (see Sanchez-Ramirez)
Errors and duplicates cleaned by Sanchez-Ramirez
Adopts a 5000km/s cutoff
Parameters
----------
sample : str, optional
DLA sample
stat : Statistical sample
all : All LLS
nonstat : Non-statistical sample
Returns
-------
dla_survey : DLASurvey
"""
# DLA files
dla_fil = pyigm_path+'/data/DLA/P03/P03_abs.fit'
ref = 'P03'
qsos_fil = pyigm_path+'/data/DLA/P03/P03_zpath.fit'
#
dla_survey = cls.load_lit(dla_fil, qsos_fil, ref, sample=sample, skip_zem=True)
return dla_survey
@classmethod
def load_G09(cls, sample='stat'):
""" Load the DLA from the Guimaraes+09 survey
(Guimaraes et al. 2009, A&A, 508, 133)
Adopts a 5000km/s cutoff
Parameters
----------
sample : str, optional
DLA sample
stat : Statistical sample
all : All LLS
nonstat : Non-statistical sample
Returns
-------
dla_survey : DLASurvey
"""
# DLA files
dla_fil = pyigm_path+'/data/DLA/G09/G09_abs.fit'
Pdla_fil = pyigm_path+'/data/DLA/G09/G09_pabs.fit'
ref = 'G09'
qsos_fil = pyigm_path+'/data/DLA/G09/G09_zpath.fit'
#
dla_survey = cls.load_lit(dla_fil, qsos_fil, ref,
Pdla_fil=Pdla_fil, sample=sample, skip_zem=True)
return dla_survey
@classmethod
def load_GGG(cls, sample='stat'):
""" Load the DLA from GGG
(Crighton et al. 2015, MNRAS, 452, 217
http://adsabs.harvard.edu/abs/2015MNRAS.452..217C)
Parameters
----------
sample : str, optional
Returns
-------
dla_survey : DLASurvey
"""
# DLA files
dla_fil = pyigm_path+'/data/DLA/GGG/GGG_DLA.dat'
ref = 'GGG'
qsos_fil = pyigm_path+'/data/DLA/GGG/GGG_QSO.dat'
#
dla_survey = cls.load_lit(dla_fil, qsos_fil, ref, sample=sample, fmt='ascii')
return dla_survey
@classmethod
def load_XQ100(cls, sample='stat'):
""" Load the DLA from XQ-100
(Sanchez-Ramirez et al. 2016, MNRAS, 456, 4488)
http://adsabs.harvard.edu/abs/2016MNRAS.456.4488S
Parameters
----------
sample : str, optional
DLA sample
stat : Statistical sample
all : All DLA
nonstat : Non-statistical sample
Returns
-------
dla_survey : DLASurvey
"""
# DLA files
dla_fil = pyigm_path+'/data/DLA/XQ-100/XQ100_abs.fit'
Pdla_fil = pyigm_path+'/data/DLA/XQ-100/XQ100_pabs.fit'
ref = 'XQ-100'
qsos_fil = pyigm_path+'/data/DLA/XQ-100/XQ100_zpath.fit'
#
dla_survey = cls.load_lit(dla_fil, qsos_fil, ref,Pdla_fil=Pdla_fil,
sample=sample, skip_zem=True)
return dla_survey
@classmethod
def neeleman13_tree(cls):
""" Read Neeleman+13 data from the DLA tree (deprecated)
Returns
-------
dlasurvey : IGMSurvey
"""
# Default sample of DLA: Neeleman
if os.getenv('DLA') is None:
print('Need to grab the DLA tree from JXP')
return None
dlasurvey = cls.from_flist('Lists/Neeleman13.lst', tree=os.environ.get('DLA'))
dlasurvey.ref = 'Neeleman+13'
# Return
return dlasurvey
def __init__(self, **kwargs):
IGMSurvey.__init__(self, 'DLA', **kwargs)
# define the cosmology (for H0)
try:
_ = self.cosmo
except ValueError:
self.cosmo = acc.FlatLambdaCDM(70., 0.3)
# Load fits
self.load_fitted()
def binned_rhoHI(self, zbins, nhbins=(20.3, 23.), nboot=1000):
""" Calculate the mass density in HI
Parameters
----------
zbins : list
nhbins : list
Returns
-------
rhoHI : ndarray
Evaluation of HI mass density, with units
rhoHI_lo : ndarray
Error estimate (low side)
rhoHI_hi : ndarray
Error estimate (high side)
"""
# generate the fN components
fncomp = self.__generate_fncomp__(nhbins, zbins)
# get the absorption path length
dXtot = self.__find_dXtot__(zbins)
# get the total column density per zbin
NHtot = self.__find_NHtot__(zbins, NH_mnx=(np.min(nhbins), np.max(nhbins)))
# bootstrap NH_average uncertainty
#NHunc = self.__bootstrap_rhohi__(fncomp, nhbins, zbins, nboot=nboot)
NHunc = 1e20
# total number of absorbers + poisson uncertainty
Ntot = fncomp.sum(axis=0)
Nunc = aspci(Ntot, interval='frequentist-confidence')
frac_unc = np.sqrt(np.power(abs(Nunc - Ntot) / Ntot, 2) +
np.power(np.array([NHunc / (NHtot / Ntot), ] * 2), 2))
# rho_HI
rhoHI = NHtot / dXtot
rhoHI_lo = rhoHI * frac_unc[0, :]
rhoHI_hi = rhoHI * frac_unc[1, :]
# Constants
rhoHI = rhoHI * (const.m_p.cgs * self.cosmo.H0 /
const.c.cgs / (u.cm ** 2)).to(u.Msun / u.Mpc ** 3)
rhoHI_lo = rhoHI_lo * (const.m_p.cgs * self.cosmo.H0 /
const.c.cgs / (u.cm ** 2)).to(u.Msun / u.Mpc ** 3)
rhoHI_hi = rhoHI_hi * (const.m_p.cgs * self.cosmo.H0 /
const.c.cgs / (u.cm ** 2)).to(u.Msun / u.Mpc ** 3)
return rhoHI, rhoHI_lo, rhoHI_hi
def fitted_lz(self, z, form='atan', boot_error=False):
""" Return l(z) as evaluated from a fit
'atan' -- arctan parameterization of Prochaska & Neeleman 2017
Parameters
----------
z : float or ndarray
form : str, optional
boot_error : bool, False
Returns
-------
loz : float or ndarray (depends on input z)
siz_lz : ndarray, optional
(if boot_error=True)
"""
if isinstance(z, float):
flg_float = True
z = np.array([z])
else:
flg_float = False
if form == 'atan':
param = self.dla_fits['lz'][form]
lz = param['A'] + param['B'] * np.arctan(z-param['C'])
# Error?
if boot_error:
lz_boot = load_boot_lz()
sig_lz = np.zeros((len(z),2))
for kk,iz in enumerate(z):
lzs = lz_boot['A'] + lz_boot['B'] * np.arctan(z-lz_boot['C'])
perc = np.percentile(lzs, [16., 84.])
# Save
sig_lz[kk,:] = perc-lz[kk]
else:
raise IOError("Bad form input to fitted_lz: {:s}".format(form))
# Finish
if flg_float:
rlz = lz[0]
else:
rlz = lz
# Return
if boot_error:
return rlz, sig_lz
else:
return rlz
def fitted_fN(self, lgNHI, form='dpow'):
""" Evaluate f(N) for a double power-law
Without normalization
Parameters
----------
lgNHI : float or ndarray
log10 NHI
form : str, optional
Returns
-------
fNHI : float or ndarray
f(NHI) without normalization
"""
if isinstance(lgNHI, float):
flg_float = True
lgNHI = np.array([lgNHI])
else:
flg_float = False
# Model -- consider using pyigm.fN.FNmodel
if form == 'dpow':
param = self.dla_fits['fN'][form]
# Evaluate
high = lgNHI > param['Nd']
fNHI = | np.zeros_like(lgNHI) | numpy.zeros_like |
# coding: utf-8
# /*##########################################################################
#
# Copyright (c) 2016-2019 European Synchrotron Radiation Facility
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# ###########################################################################*/
"""Basic tests for CurvesROIWidget"""
__authors__ = ["<NAME>"]
__license__ = "MIT"
__date__ = "07/03/2018"
from silx.gui import qt
from silx.gui.plot.stats import stats
from silx.gui.plot import StatsWidget
from silx.gui.plot.stats import statshandler
from silx.gui.utils.testutils import TestCaseQt, SignalListener
from silx.gui.plot import Plot1D, Plot2D
from silx.utils.testutils import ParametricTestCase
import unittest
import logging
import numpy
_logger = logging.getLogger(__name__)
class TestStats(TestCaseQt):
"""
Test :class:`BaseClass` class and inheriting classes
"""
def setUp(self):
TestCaseQt.setUp(self)
self.createCurveContext()
self.createImageContext()
self.createScatterContext()
def tearDown(self):
self.plot1d.setAttribute(qt.Qt.WA_DeleteOnClose)
self.plot1d.close()
self.plot2d.setAttribute(qt.Qt.WA_DeleteOnClose)
self.plot2d.close()
self.scatterPlot.setAttribute(qt.Qt.WA_DeleteOnClose)
self.scatterPlot.close()
def createCurveContext(self):
self.plot1d = Plot1D()
x = range(20)
y = range(20)
self.plot1d.addCurve(x, y, legend='curve0')
self.curveContext = stats._CurveContext(
item=self.plot1d.getCurve('curve0'),
plot=self.plot1d,
onlimits=False)
def createScatterContext(self):
self.scatterPlot = Plot2D()
lgd = 'scatter plot'
self.xScatterData = numpy.array([0, 1, 2, 20, 50, 60, 36])
self.yScatterData = numpy.array([2, 3, 4, 26, 69, 6, 18])
self.valuesScatterData = numpy.array([5, 6, 7, 10, 90, 20, 5])
self.scatterPlot.addScatter(self.xScatterData, self.yScatterData,
self.valuesScatterData, legend=lgd)
self.scatterContext = stats._ScatterContext(
item=self.scatterPlot.getScatter(lgd),
plot=self.scatterPlot,
onlimits=False
)
def createImageContext(self):
self.plot2d = Plot2D()
self._imgLgd = 'test image'
self.imageData = numpy.arange(32*128).reshape(32, 128)
self.plot2d.addImage(data=self.imageData,
legend=self._imgLgd, replace=False)
self.imageContext = stats._ImageContext(
item=self.plot2d.getImage(self._imgLgd),
plot=self.plot2d,
onlimits=False
)
def getBasicStats(self):
return {
'min': stats.StatMin(),
'minCoords': stats.StatCoordMin(),
'max': stats.StatMax(),
'maxCoords': stats.StatCoordMax(),
'std': stats.Stat(name='std', fct=numpy.std),
'mean': stats.Stat(name='mean', fct=numpy.mean),
'com': stats.StatCOM()
}
def testBasicStatsCurve(self):
"""Test result for simple stats on a curve"""
_stats = self.getBasicStats()
xData = yData = numpy.array(range(20))
self.assertEqual(_stats['min'].calculate(self.curveContext), 0)
self.assertEqual(_stats['max'].calculate(self.curveContext), 19)
self.assertEqual(_stats['minCoords'].calculate(self.curveContext), (0,))
self.assertEqual(_stats['maxCoords'].calculate(self.curveContext), (19,))
self.assertEqual(_stats['std'].calculate(self.curveContext), numpy.std(yData))
self.assertEqual(_stats['mean'].calculate(self.curveContext), numpy.mean(yData))
com = numpy.sum(xData * yData) / numpy.sum(yData)
self.assertEqual(_stats['com'].calculate(self.curveContext), com)
def testBasicStatsImage(self):
"""Test result for simple stats on an image"""
_stats = self.getBasicStats()
self.assertEqual(_stats['min'].calculate(self.imageContext), 0)
self.assertEqual(_stats['max'].calculate(self.imageContext), 128 * 32 - 1)
self.assertEqual(_stats['minCoords'].calculate(self.imageContext), (0, 0))
self.assertEqual(_stats['maxCoords'].calculate(self.imageContext), (127, 31))
self.assertEqual(_stats['std'].calculate(self.imageContext), numpy.std(self.imageData))
self.assertEqual(_stats['mean'].calculate(self.imageContext), numpy.mean(self.imageData))
yData = numpy.sum(self.imageData.astype(numpy.float64), axis=1)
xData = numpy.sum(self.imageData.astype(numpy.float64), axis=0)
dataXRange = range(self.imageData.shape[1])
dataYRange = range(self.imageData.shape[0])
ycom = numpy.sum(yData*dataYRange) / numpy.sum(yData)
xcom = numpy.sum(xData*dataXRange) / numpy.sum(xData)
self.assertEqual(_stats['com'].calculate(self.imageContext), (xcom, ycom))
def testStatsImageAdv(self):
"""Test that scale and origin are taking into account for images"""
image2Data = numpy.arange(32 * 128).reshape(32, 128)
self.plot2d.addImage(data=image2Data, legend=self._imgLgd,
replace=True, origin=(100, 10), scale=(2, 0.5))
image2Context = stats._ImageContext(
item=self.plot2d.getImage(self._imgLgd),
plot=self.plot2d,
onlimits=False
)
_stats = self.getBasicStats()
self.assertEqual(_stats['min'].calculate(image2Context), 0)
self.assertEqual(
_stats['max'].calculate(image2Context), 128 * 32 - 1)
self.assertEqual(
_stats['minCoords'].calculate(image2Context), (100, 10))
self.assertEqual(
_stats['maxCoords'].calculate(image2Context), (127*2. + 100,
31 * 0.5 + 10))
self.assertEqual(_stats['std'].calculate(image2Context),
numpy.std(self.imageData))
self.assertEqual(_stats['mean'].calculate(image2Context),
numpy.mean(self.imageData))
yData = numpy.sum(self.imageData, axis=1)
xData = numpy.sum(self.imageData, axis=0)
dataXRange = numpy.arange(self.imageData.shape[1], dtype=numpy.float64)
dataYRange = numpy.arange(self.imageData.shape[0], dtype=numpy.float64)
ycom = numpy.sum(yData * dataYRange) / numpy.sum(yData)
ycom = (ycom * 0.5) + 10
xcom = numpy.sum(xData * dataXRange) / numpy.sum(xData)
xcom = (xcom * 2.) + 100
self.assertTrue(numpy.allclose(
_stats['com'].calculate(image2Context), (xcom, ycom)))
def testBasicStatsScatter(self):
"""Test result for simple stats on a scatter"""
_stats = self.getBasicStats()
self.assertEqual(_stats['min'].calculate(self.scatterContext), 5)
self.assertEqual(_stats['max'].calculate(self.scatterContext), 90)
self.assertEqual(_stats['minCoords'].calculate(self.scatterContext), (0, 2))
self.assertEqual(_stats['maxCoords'].calculate(self.scatterContext), (50, 69))
self.assertEqual(_stats['std'].calculate(self.scatterContext), numpy.std(self.valuesScatterData))
self.assertEqual(_stats['mean'].calculate(self.scatterContext), numpy.mean(self.valuesScatterData))
data = self.valuesScatterData.astype(numpy.float64)
comx = numpy.sum(self.xScatterData * data) / numpy.sum(data)
comy = numpy.sum(self.yScatterData * data) / numpy.sum(data)
self.assertEqual(_stats['com'].calculate(self.scatterContext),
(comx, comy))
def testKindNotManagedByStat(self):
"""Make sure an exception is raised if we try to execute calculate
of the base class"""
b = stats.StatBase(name='toto', compatibleKinds='curve')
with self.assertRaises(NotImplementedError):
b.calculate(self.imageContext)
def testKindNotManagedByContext(self):
"""
Make sure an error is raised if we try to calculate a statistic with
a context not managed
"""
myStat = stats.Stat(name='toto', fct=numpy.std, kinds=('curve'))
myStat.calculate(self.curveContext)
with self.assertRaises(ValueError):
myStat.calculate(self.scatterContext)
with self.assertRaises(ValueError):
myStat.calculate(self.imageContext)
def testOnLimits(self):
stat = stats.StatMin()
self.plot1d.getXAxis().setLimitsConstraints(minPos=2, maxPos=5)
curveContextOnLimits = stats._CurveContext(
item=self.plot1d.getCurve('curve0'),
plot=self.plot1d,
onlimits=True)
self.assertEqual(stat.calculate(curveContextOnLimits), 2)
self.plot2d.getXAxis().setLimitsConstraints(minPos=32)
imageContextOnLimits = stats._ImageContext(
item=self.plot2d.getImage('test image'),
plot=self.plot2d,
onlimits=True)
self.assertEqual(stat.calculate(imageContextOnLimits), 32)
self.scatterPlot.getXAxis().setLimitsConstraints(minPos=40)
scatterContextOnLimits = stats._ScatterContext(
item=self.scatterPlot.getScatter('scatter plot'),
plot=self.scatterPlot,
onlimits=True)
self.assertEqual(stat.calculate(scatterContextOnLimits), 20)
class TestStatsFormatter(TestCaseQt):
"""Simple test to check usage of the :class:`StatsFormatter`"""
def setUp(self):
self.plot1d = Plot1D()
x = range(20)
y = range(20)
self.plot1d.addCurve(x, y, legend='curve0')
self.curveContext = stats._CurveContext(
item=self.plot1d.getCurve('curve0'),
plot=self.plot1d,
onlimits=False)
self.stat = stats.StatMin()
def tearDown(self):
self.plot1d.setAttribute(qt.Qt.WA_DeleteOnClose)
self.plot1d.close()
def testEmptyFormatter(self):
"""Make sure a formatter with no formatter definition will return a
simple cast to str"""
emptyFormatter = statshandler.StatFormatter()
self.assertEqual(
emptyFormatter.format(self.stat.calculate(self.curveContext)), '0.000')
def testSettedFormatter(self):
"""Make sure a formatter with no formatter definition will return a
simple cast to str"""
formatter= statshandler.StatFormatter(formatter='{0:.3f}')
self.assertEqual(
formatter.format(self.stat.calculate(self.curveContext)), '0.000')
class TestStatsHandler(TestCaseQt):
"""Make sure the StatHandler is correctly making the link between
:class:`StatBase` and :class:`StatFormatter` and checking the API is valid
"""
def setUp(self):
TestCaseQt.setUp(self)
self.plot1d = Plot1D()
x = range(20)
y = range(20)
self.plot1d.addCurve(x, y, legend='curve0')
self.curveItem = self.plot1d.getCurve('curve0')
self.stat = stats.StatMin()
def tearDown(self):
self.plot1d.setAttribute(qt.Qt.WA_DeleteOnClose)
self.plot1d.close()
self.plot1d = None
TestCaseQt.tearDown(self)
def testConstructor(self):
"""Make sure the constructor can deal will all possible arguments:
* tuple of :class:`StatBase` derivated classes
* tuple of tuples (:class:`StatBase`, :class:`StatFormatter`)
* tuple of tuples (str, pointer to function, kind)
"""
handler0 = statshandler.StatsHandler(
(stats.StatMin(), stats.StatMax())
)
res = handler0.calculate(item=self.curveItem, plot=self.plot1d,
onlimits=False)
self.assertTrue('min' in res)
self.assertEqual(res['min'], '0')
self.assertTrue('max' in res)
self.assertEqual(res['max'], '19')
handler1 = statshandler.StatsHandler(
(
(stats.StatMin(), statshandler.StatFormatter(formatter=None)),
(stats.StatMax(), statshandler.StatFormatter())
)
)
res = handler1.calculate(item=self.curveItem, plot=self.plot1d,
onlimits=False)
self.assertTrue('min' in res)
self.assertEqual(res['min'], '0')
self.assertTrue('max' in res)
self.assertEqual(res['max'], '19.000')
handler2 = statshandler.StatsHandler(
(
(stats.StatMin(), None),
(stats.StatMax(), statshandler.StatFormatter())
))
res = handler2.calculate(item=self.curveItem, plot=self.plot1d,
onlimits=False)
self.assertTrue('min' in res)
self.assertEqual(res['min'], '0')
self.assertTrue('max' in res)
self.assertEqual(res['max'], '19.000')
handler3 = statshandler.StatsHandler((
(('amin', numpy.argmin), statshandler.StatFormatter()),
('amax', numpy.argmax)
))
res = handler3.calculate(item=self.curveItem, plot=self.plot1d,
onlimits=False)
self.assertTrue('amin' in res)
self.assertEqual(res['amin'], '0.000')
self.assertTrue('amax' in res)
self.assertEqual(res['amax'], '19')
with self.assertRaises(ValueError):
statshandler.StatsHandler(('name'))
class TestStatsWidgetWithCurves(TestCaseQt, ParametricTestCase):
"""Basic test for StatsWidget with curves"""
def setUp(self):
TestCaseQt.setUp(self)
self.plot = Plot1D()
self.plot.show()
x = range(20)
y = range(20)
self.plot.addCurve(x, y, legend='curve0')
y = range(12, 32)
self.plot.addCurve(x, y, legend='curve1')
y = range(-2, 18)
self.plot.addCurve(x, y, legend='curve2')
self.widget = StatsWidget.StatsWidget(plot=self.plot)
self.statsTable = self.widget._statsTable
mystats = statshandler.StatsHandler((
stats.StatMin(),
(stats.StatCoordMin(), statshandler.StatFormatter(None, qt.QTableWidgetItem)),
stats.StatMax(),
(stats.StatCoordMax(), statshandler.StatFormatter(None, qt.QTableWidgetItem)),
stats.StatDelta(),
('std', numpy.std),
('mean', numpy.mean),
stats.StatCOM()
))
self.statsTable.setStats(mystats)
def tearDown(self):
self.plot.setAttribute(qt.Qt.WA_DeleteOnClose)
self.plot.close()
self.statsTable = None
self.widget.setAttribute(qt.Qt.WA_DeleteOnClose)
self.widget.close()
self.widget = None
self.plot = None
TestCaseQt.tearDown(self)
def testDisplayActiveItemsSyncOptions(self):
"""
Test that the several option of the sync options are well
synchronized between the different object"""
widget = StatsWidget.StatsWidget(plot=self.plot)
table = StatsWidget.StatsTable(plot=self.plot)
def check_display_only_active_item(only_active):
# check internal value
self.assertTrue(widget._statsTable._displayOnlyActItem is only_active)
# self.assertTrue(table._displayOnlyActItem is only_active)
# check gui display
self.assertTrue(widget._options.isActiveItemMode() is only_active)
for displayOnlyActiveItems in (True, False):
with self.subTest(displayOnlyActiveItems=displayOnlyActiveItems):
widget.setDisplayOnlyActiveItem(displayOnlyActiveItems)
# table.setDisplayOnlyActiveItem(displayOnlyActiveItems)
check_display_only_active_item(displayOnlyActiveItems)
check_display_only_active_item(only_active=False)
widget.setAttribute(qt.Qt.WA_DeleteOnClose)
table.setAttribute(qt.Qt.WA_DeleteOnClose)
widget.close()
table.close()
def testInit(self):
"""Make sure all the curves are registred on initialization"""
self.assertEqual(self.statsTable.rowCount(), 3)
def testRemoveCurve(self):
"""Make sure the Curves stats take into account the curve removal from
plot"""
self.plot.removeCurve('curve2')
self.assertEqual(self.statsTable.rowCount(), 2)
for iRow in range(2):
self.assertTrue(self.statsTable.item(iRow, 0).text() in ('curve0', 'curve1'))
self.plot.removeCurve('curve0')
self.assertEqual(self.statsTable.rowCount(), 1)
self.plot.removeCurve('curve1')
self.assertEqual(self.statsTable.rowCount(), 0)
def testAddCurve(self):
"""Make sure the Curves stats take into account the add curve action"""
self.plot.addCurve(legend='curve3', x=range(10), y=range(10))
self.assertEqual(self.statsTable.rowCount(), 4)
def testUpdateCurveFromAddCurve(self):
"""Make sure the stats of the cuve will be removed after updating a
curve"""
self.plot.addCurve(legend='curve0', x=range(10), y=range(10))
self.qapp.processEvents()
self.assertEqual(self.statsTable.rowCount(), 3)
curve = self.plot._getItem(kind='curve', legend='curve0')
tableItems = self.statsTable._itemToTableItems(curve)
self.assertEqual(tableItems['max'].text(), '9')
def testUpdateCurveFromCurveObj(self):
self.plot.getCurve('curve0').setData(x=range(4), y=range(4))
self.qapp.processEvents()
self.assertEqual(self.statsTable.rowCount(), 3)
curve = self.plot._getItem(kind='curve', legend='curve0')
tableItems = self.statsTable._itemToTableItems(curve)
self.assertEqual(tableItems['max'].text(), '3')
def testSetAnotherPlot(self):
plot2 = Plot1D()
plot2.addCurve(x=range(26), y=range(26), legend='new curve')
self.statsTable.setPlot(plot2)
self.assertEqual(self.statsTable.rowCount(), 1)
self.qapp.processEvents()
plot2.setAttribute(qt.Qt.WA_DeleteOnClose)
plot2.close()
plot2 = None
def testUpdateMode(self):
"""Make sure the update modes are well take into account"""
self.plot.setActiveCurve('curve0')
for display_only_active in (True, False):
with self.subTest(display_only_active=display_only_active):
self.widget.setDisplayOnlyActiveItem(display_only_active)
self.plot.getCurve('curve0').setData(x=range(4), y=range(4))
self.widget.setUpdateMode(StatsWidget.UpdateMode.AUTO)
update_stats_action = self.widget._options.getUpdateStatsAction()
# test from api
self.assertTrue(self.widget.getUpdateMode() is StatsWidget.UpdateMode.AUTO)
self.widget.show()
# check stats change in auto mode
self.plot.getCurve('curve0').setData(x=range(4), y=range(-1, 3))
self.qapp.processEvents()
tableItems = self.statsTable._itemToTableItems(self.plot.getCurve('curve0'))
curve0_min = tableItems['min'].text()
print(curve0_min)
self.assertTrue(float(curve0_min) == -1.)
self.plot.getCurve('curve0').setData(x=range(4), y=range(1, 5))
self.qapp.processEvents()
tableItems = self.statsTable._itemToTableItems(self.plot.getCurve('curve0'))
curve0_min = tableItems['min'].text()
self.assertTrue(float(curve0_min) == 1.)
# check stats change in manual mode only if requested
self.widget.setUpdateMode(StatsWidget.UpdateMode.MANUAL)
self.assertTrue(self.widget.getUpdateMode() is StatsWidget.UpdateMode.MANUAL)
self.plot.getCurve('curve0').setData(x=range(4), y=range(2, 6))
self.qapp.processEvents()
tableItems = self.statsTable._itemToTableItems(self.plot.getCurve('curve0'))
curve0_min = tableItems['min'].text()
self.assertTrue(float(curve0_min) == 1.)
update_stats_action.trigger()
tableItems = self.statsTable._itemToTableItems(self.plot.getCurve('curve0'))
curve0_min = tableItems['min'].text()
self.assertTrue(float(curve0_min) == 2.)
def testItemHidden(self):
"""Test if an item is hide, then the associated stats item is also
hide"""
curve0 = self.plot.getCurve('curve0')
curve1 = self.plot.getCurve('curve1')
curve2 = self.plot.getCurve('curve2')
self.plot.show()
self.widget.show()
self.qWaitForWindowExposed(self.widget)
self.assertFalse(self.statsTable.isRowHidden(0))
self.assertFalse(self.statsTable.isRowHidden(1))
self.assertFalse(self.statsTable.isRowHidden(2))
curve0.setVisible(False)
self.qapp.processEvents()
self.assertTrue(self.statsTable.isRowHidden(0))
curve0.setVisible(True)
self.qapp.processEvents()
self.assertFalse(self.statsTable.isRowHidden(0))
curve1.setVisible(False)
self.qapp.processEvents()
self.assertTrue(self.statsTable.isRowHidden(1))
tableItems = self.statsTable._itemToTableItems(curve2)
curve2_min = tableItems['min'].text()
self.assertTrue(float(curve2_min) == -2.)
curve0.setVisible(False)
curve1.setVisible(False)
curve2.setVisible(False)
self.qapp.processEvents()
self.assertTrue(self.statsTable.isRowHidden(0))
self.assertTrue(self.statsTable.isRowHidden(1))
self.assertTrue(self.statsTable.isRowHidden(2))
class TestStatsWidgetWithImages(TestCaseQt):
"""Basic test for StatsWidget with images"""
IMAGE_LEGEND = 'test image'
def setUp(self):
TestCaseQt.setUp(self)
self.plot = Plot2D()
self.plot.addImage(data=numpy.arange(128*128).reshape(128, 128),
legend=self.IMAGE_LEGEND, replace=False)
self.widget = StatsWidget.StatsTable(plot=self.plot)
mystats = statshandler.StatsHandler((
(stats.StatMin(), statshandler.StatFormatter()),
(stats.StatCoordMin(), statshandler.StatFormatter(None, qt.QTableWidgetItem)),
(stats.StatMax(), statshandler.StatFormatter()),
(stats.StatCoordMax(), statshandler.StatFormatter(None, qt.QTableWidgetItem)),
(stats.StatDelta(), statshandler.StatFormatter()),
('std', numpy.std),
('mean', numpy.mean),
(stats.StatCOM(), statshandler.StatFormatter(None))
))
self.widget.setStats(mystats)
def tearDown(self):
self.plot.setAttribute(qt.Qt.WA_DeleteOnClose)
self.plot.close()
self.widget.setAttribute(qt.Qt.WA_DeleteOnClose)
self.widget.close()
self.widget = None
self.plot = None
TestCaseQt.tearDown(self)
def test(self):
image = self.plot._getItem(
kind='image', legend=self.IMAGE_LEGEND)
tableItems = self.widget._itemToTableItems(image)
maxText = '{0:.3f}'.format((128 * 128) - 1)
self.assertEqual(tableItems['legend'].text(), self.IMAGE_LEGEND)
self.assertEqual(tableItems['min'].text(), '0.000')
self.assertEqual(tableItems['max'].text(), maxText)
self.assertEqual(tableItems['delta'].text(), maxText)
self.assertEqual(tableItems['coords min'].text(), '0.0, 0.0')
self.assertEqual(tableItems['coords max'].text(), '127.0, 127.0')
def testItemHidden(self):
"""Test if an item is hide, then the associated stats item is also
hide"""
self.widget.show()
self.plot.show()
self.qWaitForWindowExposed(self.widget)
self.assertFalse(self.widget.isRowHidden(0))
self.plot.getImage(self.IMAGE_LEGEND).setVisible(False)
self.qapp.processEvents()
self.assertTrue(self.widget.isRowHidden(0))
class TestStatsWidgetWithScatters(TestCaseQt):
SCATTER_LEGEND = 'scatter plot'
def setUp(self):
TestCaseQt.setUp(self)
self.scatterPlot = Plot2D()
self.scatterPlot.addScatter([0, 1, 2, 20, 50, 60],
[2, 3, 4, 26, 69, 6],
[5, 6, 7, 10, 90, 20],
legend=self.SCATTER_LEGEND)
self.widget = StatsWidget.StatsTable(plot=self.scatterPlot)
mystats = statshandler.StatsHandler((
stats.StatMin(),
(stats.StatCoordMin(), statshandler.StatFormatter(None, qt.QTableWidgetItem)),
stats.StatMax(),
(stats.StatCoordMax(), statshandler.StatFormatter(None, qt.QTableWidgetItem)),
stats.StatDelta(),
('std', numpy.std),
('mean', numpy.mean),
stats.StatCOM()
))
self.widget.setStats(mystats)
def tearDown(self):
self.scatterPlot.setAttribute(qt.Qt.WA_DeleteOnClose)
self.scatterPlot.close()
self.widget.setAttribute(qt.Qt.WA_DeleteOnClose)
self.widget.close()
self.widget = None
self.scatterPlot = None
TestCaseQt.tearDown(self)
def testStats(self):
scatter = self.scatterPlot._getItem(
kind='scatter', legend=self.SCATTER_LEGEND)
tableItems = self.widget._itemToTableItems(scatter)
self.assertEqual(tableItems['legend'].text(), self.SCATTER_LEGEND)
self.assertEqual(tableItems['min'].text(), '5')
self.assertEqual(tableItems['coords min'].text(), '0, 2')
self.assertEqual(tableItems['max'].text(), '90')
self.assertEqual(tableItems['coords max'].text(), '50, 69')
self.assertEqual(tableItems['delta'].text(), '85')
class TestEmptyStatsWidget(TestCaseQt):
def test(self):
widget = StatsWidget.StatsWidget()
widget.show()
self.qWaitForWindowExposed(widget)
# skip unit test for pyqt4 because there is some unrealised widget without
# apparent reason
@unittest.skipIf(qt.qVersion().split('.')[0] == '4', reason='PyQt4 not tested')
class TestLineWidget(TestCaseQt):
"""Some test for the StatsLineWidget."""
def setUp(self):
TestCaseQt.setUp(self)
mystats = statshandler.StatsHandler((
(stats.StatMin(), statshandler.StatFormatter()),
))
self.plot = Plot1D()
self.plot.show()
self.x = range(20)
self.y0 = range(20)
self.curve0 = self.plot.addCurve(self.x, self.y0, legend='curve0')
self.y1 = range(12, 32)
self.plot.addCurve(self.x, self.y1, legend='curve1')
self.y2 = range(-2, 18)
self.plot.addCurve(self.x, self.y2, legend='curve2')
self.widget = StatsWidget.BasicGridStatsWidget(plot=self.plot,
kind='curve',
stats=mystats)
def tearDown(self):
self.qapp.processEvents()
self.plot.setAttribute(qt.Qt.WA_DeleteOnClose)
self.plot.close()
self.widget.setPlot(None)
self.widget._lineStatsWidget._statQlineEdit.clear()
self.widget.setAttribute(qt.Qt.WA_DeleteOnClose)
self.widget.close()
self.widget = None
self.plot = None
TestCaseQt.tearDown(self)
def testProcessing(self):
self.widget._lineStatsWidget.setStatsOnVisibleData(False)
self.qapp.processEvents()
self.plot.setActiveCurve(legend='curve0')
self.assertTrue(self.widget._lineStatsWidget._statQlineEdit['min'].text() == '0.000')
self.plot.setActiveCurve(legend='curve1')
self.assertTrue(self.widget._lineStatsWidget._statQlineEdit['min'].text() == '12.000')
self.plot.getXAxis().setLimitsConstraints(minPos=2, maxPos=5)
self.widget.setStatsOnVisibleData(True)
self.qapp.processEvents()
self.assertTrue(self.widget._lineStatsWidget._statQlineEdit['min'].text() == '14.000')
self.plot.setActiveCurve(None)
self.assertTrue(self.plot.getActiveCurve() is None)
self.widget.setStatsOnVisibleData(False)
self.qapp.processEvents()
self.assertFalse(self.widget._lineStatsWidget._statQlineEdit['min'].text() == '14.000')
self.widget.setKind('image')
self.plot.addImage(numpy.arange(100*100).reshape(100, 100) + 0.312)
self.qapp.processEvents()
self.assertTrue(self.widget._lineStatsWidget._statQlineEdit['min'].text() == '0.312')
def testUpdateMode(self):
"""Make sure the update modes are well take into account"""
self.plot.setActiveCurve(self.curve0)
_autoRB = self.widget._options._autoRB
_manualRB = self.widget._options._manualRB
# test from api
self.widget.setUpdateMode(StatsWidget.UpdateMode.AUTO)
self.assertTrue(_autoRB.isChecked())
self.assertFalse(_manualRB.isChecked())
# check stats change in auto mode
curve0_min = self.widget._lineStatsWidget._statQlineEdit['min'].text()
new_y = | numpy.array(self.y0) | numpy.array |
#!/usr/bin/env python
"""Tests for `paired_data.py`."""
from __future__ import division, print_function
import unittest
import numpy as np
import SHS_data
import util
from cover_id import paired_data
class Test_get_pairs(unittest.TestCase):
"""Tests for `get_pairs'."""
def setUp(self):
ratio = (1,9,90)
clique_dict, self.cliques_by_uri = SHS_data.read_cliques()
self.train_cliques, _, _ = util.split_train_test_validation(clique_dict,
ratio=ratio)
def test_dimensions(self):
"""Are `pairs` and `non_pairs` the same length, and all
pairs and non-pairs length-2?
And are all pairs (and no non-pairs) indeed from the same clique?
"""
pairs, non_pairs = paired_data.get_pairs(self.train_cliques)
self.assertEqual(len(pairs), len(non_pairs))
self.assertTrue(np.all([len(pair) == 2 for pair in pairs]))
self.assertTrue(np.all([len(non_pair) == 2 for non_pair in non_pairs]))
self.assertTrue(np.all([self.cliques_by_uri[pair[0]] ==
self.cliques_by_uri[pair[1]]
for pair in pairs]))
self.assertTrue(not np.any([self.cliques_by_uri[non_pair[0]] ==
self.cliques_by_uri[non_pair[1]]
for non_pair in non_pairs]))
class Test_patchwork(unittest.TestCase):
"""Tests for `patchwork'."""
def setUp(self):
ratio = (1,9,90)
clique_dict, _ = SHS_data.read_cliques()
train_cliques, _, _ = util.split_train_test_validation(clique_dict,
ratio=ratio)
self.pairs, self.non_pairs = paired_data.get_pairs(train_cliques)
def test_artificial_data(self):
"""Do the patchworks share first and last frame with chroma?
And do they have the correct size?"""
n_patches, patch_len = 3, 10
for len_x in range(1, 2 * n_patches * patch_len):
chroma = np.random.rand(len_x, 12)
patches = paired_data.patchwork(chroma, n_patches=n_patches, patch_len=patch_len)
self.assertTrue(np.allclose(patches[0], chroma[0]))
self.assertTrue(np.allclose(patches[-1], chroma[-1]))
self.assertEqual(len(patches), n_patches * patch_len, msg='len_x = {}'.format(len_x))
def test_real_data(self):
"""Do the patchworks share first and last frame with chroma?
Do patchworks for a given pair have the same size?
"""
test_pair = self.pairs[0]
chroma_1 = SHS_data.read_chroma(test_pair[0])
chroma_2 = SHS_data.read_chroma(test_pair[1])
patches_1 = paired_data.patchwork(chroma_1)
patches_2 = paired_data.patchwork(chroma_2)
test_non_pair = self.non_pairs[0]
chroma_3 = SHS_data.read_chroma(test_non_pair[0])
chroma_4 = SHS_data.read_chroma(test_non_pair[1])
patches_3 = paired_data.patchwork(chroma_3)
patches_4 = paired_data.patchwork(chroma_4)
self.assertTrue(np.all(patches_1[0] == chroma_1[0]))
self.assertTrue(np.all(patches_1[-1] == chroma_1[-1]))
self.assertTrue( | np.all(patches_2[0] == chroma_2[0]) | numpy.all |
import warnings
import numpy as np
import numpy.testing as npt
from dipy.data import get_fnames
from dipy.core.gradients import (gradient_table, GradientTable,
gradient_table_from_bvals_bvecs,
gradient_table_from_qvals_bvecs,
gradient_table_from_gradient_strength_bvecs,
WATER_GYROMAGNETIC_RATIO,
reorient_bvecs, generate_bvecs,
check_multi_b)
from dipy.io.gradients import read_bvals_bvecs
def test_btable_prepare():
sq2 = np.sqrt(2) / 2.
bvals = 1500 * np.ones(7)
bvals[0] = 0
bvecs = np.array([[0, 0, 0],
[1, 0, 0],
[0, 1, 0],
[0, 0, 1],
[sq2, sq2, 0],
[sq2, 0, sq2],
[0, sq2, sq2]])
bt = gradient_table(bvals, bvecs)
npt.assert_array_equal(bt.bvecs, bvecs)
# bt.info
fimg, fbvals, fbvecs = get_fnames('small_64D')
bvals, bvecs = read_bvals_bvecs(fbvals, fbvecs)
bvecs = np.where(np.isnan(bvecs), 0, bvecs)
bt = gradient_table(bvals, bvecs)
npt.assert_array_equal(bt.bvecs, bvecs)
bt2 = gradient_table(bvals, bvecs.T)
npt.assert_array_equal(bt2.bvecs, bvecs)
btab = np.concatenate((bvals[:, None], bvecs), axis=1)
bt3 = gradient_table(btab)
npt.assert_array_equal(bt3.bvecs, bvecs)
npt.assert_array_equal(bt3.bvals, bvals)
bt4 = gradient_table(btab.T)
npt.assert_array_equal(bt4.bvecs, bvecs)
npt.assert_array_equal(bt4.bvals, bvals)
# Test for proper inputs (expects either bvals/bvecs or 4 by n):
npt.assert_raises(ValueError, gradient_table, bvecs)
def test_GradientTable():
gradients = np.array([[0, 0, 0],
[1, 0, 0],
[0, 0, 1],
[3, 4, 0],
[5, 0, 12]], 'float')
expected_bvals = np.array([0, 1, 1, 5, 13])
expected_b0s_mask = expected_bvals == 0
expected_bvecs = gradients / (expected_bvals + expected_b0s_mask)[:, None]
gt = GradientTable(gradients, b0_threshold=0)
npt.assert_array_almost_equal(gt.bvals, expected_bvals)
npt.assert_array_equal(gt.b0s_mask, expected_b0s_mask)
npt.assert_array_almost_equal(gt.bvecs, expected_bvecs)
npt.assert_array_almost_equal(gt.gradients, gradients)
gt = GradientTable(gradients, b0_threshold=1)
npt.assert_array_equal(gt.b0s_mask, [1, 1, 1, 0, 0])
npt.assert_array_equal(gt.bvals, expected_bvals)
npt.assert_array_equal(gt.bvecs, expected_bvecs)
# checks negative values in gtab
npt.assert_raises(ValueError, GradientTable, -1)
npt.assert_raises(ValueError, GradientTable, np.ones((6, 2)))
npt.assert_raises(ValueError, GradientTable, np.ones((6,)))
with warnings.catch_warnings(record=True) as w:
bad_gt = gradient_table(expected_bvals, expected_bvecs,
b0_threshold=200)
assert len(w) == 1
def test_gradient_table_from_qvals_bvecs():
qvals = 30. * np.ones(7)
big_delta = .03 # pulse separation of 30ms
small_delta = 0.01 # pulse duration of 10ms
qvals[0] = 0
sq2 = np.sqrt(2) / 2
bvecs = np.array([[0, 0, 0],
[1, 0, 0],
[0, 1, 0],
[0, 0, 1],
[sq2, sq2, 0],
[sq2, 0, sq2],
[0, sq2, sq2]])
gt = gradient_table_from_qvals_bvecs(qvals, bvecs,
big_delta, small_delta)
bvals_expected = (qvals * 2 * np.pi) ** 2 * (big_delta - small_delta / 3.)
gradient_strength_expected = qvals * 2 * np.pi /\
(small_delta * WATER_GYROMAGNETIC_RATIO)
npt.assert_almost_equal(gt.gradient_strength, gradient_strength_expected)
npt.assert_almost_equal(gt.bvals, bvals_expected)
def test_gradient_table_from_gradient_strength_bvecs():
gradient_strength = .03e-3 * np.ones(7) # clinical strength at 30 mT/m
big_delta = .03 # pulse separation of 30ms
small_delta = 0.01 # pulse duration of 10ms
gradient_strength[0] = 0
sq2 = np.sqrt(2) / 2
bvecs = np.array([[0, 0, 0],
[1, 0, 0],
[0, 1, 0],
[0, 0, 1],
[sq2, sq2, 0],
[sq2, 0, sq2],
[0, sq2, sq2]])
gt = gradient_table_from_gradient_strength_bvecs(gradient_strength, bvecs,
big_delta, small_delta)
qvals_expected = (gradient_strength * WATER_GYROMAGNETIC_RATIO *
small_delta / (2 * np.pi))
bvals_expected = (qvals_expected * 2 * np.pi) ** 2 *\
(big_delta - small_delta / 3.)
npt.assert_almost_equal(gt.qvals, qvals_expected)
npt.assert_almost_equal(gt.bvals, bvals_expected)
def test_gradient_table_from_bvals_bvecs():
sq2 = np.sqrt(2) / 2
bvals = [0, 1, 2, 3, 4, 5, 6, 0]
bvecs = np.array([[0, 0, 0],
[1, 0, 0],
[0, 1, 0],
[0, 0, 1],
[sq2, sq2, 0],
[sq2, 0, sq2],
[0, sq2, sq2],
[0, 0, 0]])
gt = gradient_table_from_bvals_bvecs(bvals, bvecs, b0_threshold=0)
npt.assert_array_equal(gt.bvecs, bvecs)
npt.assert_array_equal(gt.bvals, bvals)
npt.assert_array_equal(gt.gradients, np.reshape(bvals, (-1, 1)) * bvecs)
npt.assert_array_equal(gt.b0s_mask, [1, 0, 0, 0, 0, 0, 0, 1])
# Test nans are replaced by 0
new_bvecs = bvecs.copy()
new_bvecs[[0, -1]] = np.nan
gt = gradient_table_from_bvals_bvecs(bvals, new_bvecs, b0_threshold=0)
npt.assert_array_equal(gt.bvecs, bvecs)
# Bvalue > 0 for non-unit vector
bad_bvals = [2, 1, 2, 3, 4, 5, 6, 0]
npt.assert_raises(ValueError, gradient_table_from_bvals_bvecs, bad_bvals,
bvecs, b0_threshold=0.)
# num_gard inconsistent bvals, bvecs
bad_bvals = np.ones(7)
npt.assert_raises(ValueError, gradient_table_from_bvals_bvecs, bad_bvals,
bvecs, b0_threshold=0.)
# negative bvals
bad_bvals = [-1, -1, -1, -5, -6, -10]
npt.assert_raises(ValueError, gradient_table_from_bvals_bvecs, bad_bvals,
bvecs, b0_threshold=0.)
# bvals not 1d
bad_bvals = np.ones((1, 8))
npt.assert_raises(ValueError, gradient_table_from_bvals_bvecs, bad_bvals,
bvecs, b0_threshold=0.)
# bvec not 2d
bad_bvecs = np.ones((1, 8, 3))
npt.assert_raises(ValueError, gradient_table_from_bvals_bvecs, bvals,
bad_bvecs, b0_threshold=0.)
# bvec not (N, 3)
bad_bvecs = np.ones((8, 2))
npt.assert_raises(ValueError, gradient_table_from_bvals_bvecs, bvals,
bad_bvecs, b0_threshold=0.)
# bvecs not unit vectors
bad_bvecs = bvecs * 2
npt.assert_raises(ValueError, gradient_table_from_bvals_bvecs, bvals,
bad_bvecs, b0_threshold=0.)
# Test **kargs get passed along
gt = gradient_table_from_bvals_bvecs(bvals, bvecs, b0_threshold=0,
big_delta=5, small_delta=2)
npt.assert_equal(gt.big_delta, 5)
npt.assert_equal(gt.small_delta, 2)
def test_b0s():
sq2 = np.sqrt(2) / 2.
bvals = 1500 * np.ones(8)
bvals[0] = 0
bvals[7] = 0
bvecs = np.array([[0, 0, 0],
[1, 0, 0],
[0, 1, 0],
[0, 0, 1],
[sq2, sq2, 0],
[sq2, 0, sq2],
[0, sq2, sq2],
[0, 0, 0]])
bt = gradient_table(bvals, bvecs)
npt.assert_array_equal(np.where(bt.b0s_mask > 0)[0], np.array([0, 7]))
npt.assert_array_equal(np.where(bt.b0s_mask == 0)[0], np.arange(1, 7))
def test_gtable_from_files():
fimg, fbvals, fbvecs = get_fnames('small_101D')
gt = gradient_table(fbvals, fbvecs)
bvals, bvecs = read_bvals_bvecs(fbvals, fbvecs)
npt.assert_array_equal(gt.bvals, bvals)
npt.assert_array_equal(gt.bvecs, bvecs)
def test_deltas():
sq2 = np.sqrt(2) / 2.
bvals = 1500 * np.ones(7)
bvals[0] = 0
bvecs = np.array([[0, 0, 0],
[1, 0, 0],
[0, 1, 0],
[0, 0, 1],
[sq2, sq2, 0],
[sq2, 0, sq2],
[0, sq2, sq2]])
bt = gradient_table(bvals, bvecs, big_delta=5, small_delta=2)
npt.assert_equal(bt.big_delta, 5)
npt.assert_equal(bt.small_delta, 2)
def test_qvalues():
sq2 = np.sqrt(2) / 2.
bvals = 1500 * np.ones(7)
bvals[0] = 0
bvecs = np.array([[0, 0, 0],
[1, 0, 0],
[0, 1, 0],
[0, 0, 1],
[sq2, sq2, 0],
[sq2, 0, sq2],
[0, sq2, sq2]])
qvals = np.sqrt(bvals / 6) / (2 * np.pi)
bt = gradient_table(bvals, bvecs, big_delta=8, small_delta=6)
npt.assert_almost_equal(bt.qvals, qvals)
def test_reorient_bvecs():
sq2 = | np.sqrt(2) | numpy.sqrt |
__all__ = ["termReinit"]
import copy
import numpy as np
from LevelSetPy.Utilities import *
def termReinit(t, y, schemeData):
"""
termReinit: a Godunov solver for the reinitialization HJ PDE.
[ ydot, stepBound, schemeData ] = termReinit(t, y, schemeData)
Computes a Godunov approximation to motion by the reinitialization
equation. While the reinitialization equation is a general nonlinear HJ
PDE, such a Godunov approximation is the least dissipative monotone
approximation (less dissipative than Roe-Fix or Lax-Friedrichs). The
reinitialization equation is
D_t \phi = -sign(\phi_0)(\|\grad \phi\| - 1).
where phi_0 is the initial conditions. Solving the reinitialization
equation turns an implicit surface function into a signed distance
function. It is iterative, and often slower than a fast marching method
however, it can use high order approximations and can start directly from
the implicit surface function without needing to explicitly find the
implicit surface (although the subcell fix discussed below does in some
sense find the surface).
The term and concept of reinitialization was originally proposed by Chopp
in 1993, but the reinitialization PDE used here comes from
<NAME>, <NAME> & <NAME>, "A Level Set Method for Computing
Solutions to Incompressible Two Phase Flow," J. Computational Physics,
v. 119, pp. 145-159 (1994).
It is discussed in O&F chapter 7.4. The translation between the
parameters of this function and the notation in O&F,
data = y \phi, reshaped to vector form.
derivFunc Function to calculate phi_i^+-.
initial \phi_0
delta = ydot -S(\phi_0)(|\grad \phi| - 1)
The Gudonov solver used below comes from from appendix A.3 of
<NAME>, <NAME>, <NAME> & <NAME>, "A Non-Oscillatory
Eulerian Approach to Interfaces in Multimaterial Flows (the Ghost
Fluid Method)," J. Computational Physics, v. 152, pp. 457-492 (1999).
The subcell fix option is implemented based on
<NAME> & <NAME>, "A Remark on Computing Distance
Functions," J. Computational Physics, v. 163, pp. 51-67 (2000),
doi:10.1006/jeph.2000.6553
Note that Russo & Smereka is based on the schemes from Sussman, Smereka &
Osher and Sussman & Fatemi (1999), which may not be identical to Fedkiw,
Aslam, Merriman & Osher.
The smoothed sgn() approximation is given in the subfunction smearedSign.
Note that if the subcell fix is applied, the smoothed sgn() function is
not used (although a similar smoothing is implicitly applied).
Inp.t Parameters:
t: Time at beginning of timestep.
y: Data array in vector form.
schemeData: A structure (see below).
Output Parameters:
ydot: Change in the data array, in vector form.
stepBound: CFL bound on timestep for stability.
schemeData: The same as the inp.t argument (unmodified).
The parameter schemeData is a structure containing data specific to this
type of term approximation. For this function it contains the field(s)
.grid: Grid structure (see processGrid.m for details).
.derivFunc: Function handle to upwinded finite difference derivative
approximation.
.initial: Array, same size as data. Initial implicit surface function
(used to determine on which side of surface node should lie).
Consequently, on the first reinitialization timestep, this array should
be equal to data.
.subcell_fix_order: Integer. Specifies whether to apply the subcell fix
from Russo & Smereka to nodes near the interface, and if the fix is
applied specifies what order of accuracy to use. Specify order 0 to
turn off the fix. At present, only orders 0 and 1 are supported.
Optional. Default = 1.
The schemeData structure may contain addition fields at the user's
discretion.
For evolving vector level sets, y may be a cell vector. If y is a cell
vector, schemeData may be a cell vector of equal length. In this case all
the elements of y (and schemeData if necessary) are ignored except the
first.
Copyright 2007 <NAME> (<EMAIL>).
This software is used, copied and distributed under the licensing
agreement contained in the file LICENSE in the top directory of
the distribution.
<NAME> 5/27/03
Calling parameters significantly modified, <NAME> 2/13/04.
Updated to handle vector level sets. <NAME> 11/23/04.
Include the subcell fix and edit the help entry. <NAME> 5/5/07.
<NAME>, 08/21/21
"""
#The subcell fix has some options.
# Use the robust signed distance function (17) or the simple one (13)?
# The simple one often fails due to divide by zero errors, so be careful.
robust_subcell = 1
# Small positive parameter that appears in the robust version. In
# fact, we will use this as a relative value with respect to grid.dx
robust_small_epsilon = 1e6 * eps
#For vector level sets, ignore all the other elements.
if iscell(schemeData):
thisSchemeData = schemeData[0]
else:
thisSchemeData = schemeData
# Check for required fields.
assert isfield(thisSchemeData, 'grid'), "grid not in schemeData"
assert isfield(thisSchemeData, 'derivFunc'), "derivFunc not in schemeData"
assert isfield(thisSchemeData, 'initial'), "initial not in schemeData"
grid = thisSchemeData.grid
if iscell(y):
data = y[0].reshape(grid.shape, order='F')
else:
data = y.reshape(grid.shape, order='F')
if isfield(thisSchemeData, 'subcell_fix_order'):
if thisSchemeData.subcell_fix_order==0:
apply_subcell_fix = 0
elif thisSchemeData.subcell_fix_order== 1:
apply_subcell_fix = 1
subcell_fix_order = 1
else:
error(f'Reinit subcell fix order of accuracy {thisSchemeData.subcell_fix_order} not supported')
else:
#Default behavior is to apply the simplest subcell fix.
apply_subcell_fix = 1
subcell_fix_order = 1
if apply_subcell_fix:
# The sign function is only used far from the interface, so we do
# not need to smooth it.
S = np.sign(thisSchemeData.initial)
else:
# Smearing factor for the smooth approximation of the sign function.
dx = np.max(grid.dx)
sgnFactor = dx**2
# Sign function (smeared) identifies on which side of surface each node
# lies.
S = smearedSign(grid, thisSchemeData.initial, sgnFactor)
"""
Compute Godunov derivative approximation for each dimension. This
code is used for the PDE far from the interface, or for all nodes if
the subcell fix is not applied.
"""
deriv = cell(grid.dim, 1)
for i in range(grid.dim):
derivL, derivR = thisSchemeData.derivFunc(grid, data, i)
# For Gudunov's method, check characteristic directions
# according to left and right derivative approximations.
# Both directions agree that flow is to the left.
flowL = ((S * derivR <= 0) and (S * derivL <= 0))
# Both directions agree that flow is to the right.
flowR = ((S * derivR >= 0) and (S * derivL >= 0))
# Diverging flow entropy condition requires choosing deriv = 0
# (so we don't actually have to calculate this term).
#flow0 = ((S * derivR > 0) and (S * derivL < 0))
# Converging flow, need to check which direction arrives first.
flows = ((S * derivR < 0) and (S * derivL > 0))
if(np.any(flows.flatten())):
conv = np.where(flows)
s = zeros(size(flows))
s[conv] *=(np.abs(derivR[conv]) - np.abs(derivL[conv]))/(derivR[conv] - derivL[conv])
# If s == 0, both directions arrive at the same time.
# Assuming continuity, both will produce same result, so pick one.
flowL[conv] = flowL[conv] or (s[conv] < 0)
flowR[conv] = flowR[conv] or (s[conv] >= 0)
deriv[i] = derivL * flowR + derivR * flowL
# Compute magnitude of gradient.
mag = zeros(size(grid.xs[0]))
for i in range(grid.dim):
mag += deriv[i]**2
mag = np.max(np.sqrt(mag), eps)
# Start with constant term in the reinitialization equation.
delta = -S
# Compute change in function and bound on step size.
stepBoundInv = 0
for i in range(grid.dim):
# Effective velocity field (for timestep bounding).
v = S * deriv[i]/mag
# Update just like a velocity field.
delta += v * deriv[i]
# CFL condition using effective velocity.
stepBoundInv = stepBoundInv + np.max(np.abs(v[:])) / grid.dx[i]
if apply_subcell_fix:
if subcell_fix_order==1:
# Most of the effort below -- specifically computation of the distance to
# the interface D -- depends only on thisSchemeData.initial, so
# recomputation could be avoided if there were some easy way to
# memoize the results between timesteps. It could be done by
# modifying schemeData, but that has a rather high overhead and could
# lead to bugs if the user fiddles with schemeData. So for now, we
# recompute at each timestep.
# Set up some index cell vectors. No ghost cells will be used, since
# nodes near the edge of the computational domain should not be
# near the interface. Where necessary, we will modify the stencil
# near the edge of the domain.
indexL = cell(grid.dim, 1)
for d in range(grid.dim):
indexL[d] = quickarray(0, grid.N[d])
indexR = indexL
# Compute denominator in (13) or (16) or (23). Note that we
# have moved the delta x term into this denominator to treat
# the case when delta x is not the same in each dimension.
denom = zeros(size(data))
for d in range(grid.dim):
dx_inv = 1 / grid.dx[d]
# Long difference used in (13) and (23). For the nodes near the
# edge of the computational domain, we will just use short differences.
indexL[d] = [0] + quickarray(0, grid.N[d] - 1)
indexR[d] = [1]+ quickarray(1, grid.N[d])
diff2 = (0.5 * dx_inv@(thisSchemeData.initial[indexR[:]] - thisSchemeData.initial[indexL[:]])) ** 2
if robust_subcell:
# Need the short differences.
indexL[d] = quickarray(0,grid.N[d] - 1)
indexR[d] = quickarray(1,grid.N[d])
short_diff2 = (dx_inv@(thisSchemeData.initial[indexR[:]] - thisSchemeData.initial[indexL[:]])) ** 2
# All the various terms of (17).
diff2[indexL[:]] = np.max(diff2[indexL[:]], short_diff2)
diff2[indexR[:]] = np.max(diff2[indexR[:]], short_diff2)
diff2 = np.max(diff2, robust_small_epsilon ** 2)
# Include this dimension's contribution to the distance.
denom += diff2
# Reset the index vectors.
indexL[d] = quickarray(0, grid.N[d])
indexR[d] = quickarray(0, grid.N[d])
denom = np.sqrt(denom)
# Complete (13) or (16) or (23). Note that delta x was already included
# in the denominator calculation above, so it does not appear.
D = thisSchemeData.initial / denom
# We do need to know which nodes are near the interface.
near = isNearInterface(thisSchemeData.initial)
# Adjust the update. The delta x that appears in (15) or (22)
# comes from the smoothing in (14), so we choose the maximum
# delta x in this case (guarantees sufficient smoothing no
# matter what the direction of the interface). For grids with
# different delta x, this choice may require more
# reinitialization steps to achieve desired results.
delta = (delta * (not near)+(S * np.abs(data) - D) / np.max(grid.dx) * near)
# We will not adjust the CFL step bound. By Russo & Smereka, the
# adjusted update has a bound of 1, and the regular scheme above should
# already have that same upper bound.
else:
error(f'Reinit subcell fix order of accuracy {subcell_fix_order} not supported')
stepBound = 1 / stepBoundInv
# Reshape output into vector format and negate for RHS of ODE.
ydot = expand(-delta[:], 1)
return ydot, stepBound, schemeData
def smearedSign(grid, data, sgnFactor):
"""
s = smearedSign(grid, data)
Helper function to generated a smeared signum function.
This version (with sgnFactor = dx**2) is (7.5) in O&F chapter 7.4.
"""
s = data / | np.sqrt(data**2 + sgnFactor) | numpy.sqrt |
from ast import Break
from turtle import left
import numpy as np
import numpy.linalg as linalg
from yaml.error import Mark
import rospy
import scipy.integrate
from scipy.spatial.transform import Rotation as rot_math
from visualization_msgs.msg import MarkerArray, Marker
from nav_msgs.msg import Odometry, Path
from geometry_msgs.msg import PoseStamped
from rotor_tm_msgs.msg import RPMCommand, FMCommand
from rotor_tm_utils import utilslib, rosutilslib
from rotor_tm_utils.vee import vee
from rotor_tm_utils import utilslib
import time
def ptmassslackToTaut(t, x):
# DESCRIPTION:
# event function for point mass scenario dynammics
# if event is reached by ivp solver, it will switch from slack to taut, takes in t (time) and state (x)
# INPUTS:
# t - time
# x - state of the point mass system. Specifically,
# x is a 26 by 1 ndarray,
# Name Last Element Location (counting from 1)
# x = np.array([ppx, ppy, ppz, # payload position 3
# pvx, pvy, pvz, # payload velocity 6
# pu, pi, pj, pk, # payload quat 10
# pwx, pwy, pwz, # payload omega 13
# qpx, qpy, qpz, # quad rotor position 16
# qvx, qvy, qvz, # quad rotor velocity 19
# qu, qi, qj, qk, # quad rotor quat 23
# qwx, qwy, qwz]) # quad rotor omega 26
# OUTPUTS:
# value - a float that determines taut condition
value = np.linalg.norm(x[0:3] - x[13:16]) - ptmassslackToTaut.cable_length
return value
def ptmasstautToSlack(t, x):
# DESCRIPTION:
# event function for point mass scenario dynammics
# if event is reached by ivp solver, it will switch from taut to slack, takes in t (time) and state (x)
# INPUTS:
# t - time
# x - state of the point mass system. Specifically,
# x is a 26 by 1 ndarray,
# Name Last Element Location (counting from 1)
# x = np.array([ppx, ppy, ppz, # payload position 3
# pvx, pvy, pvz, # payload velocity 6
# pu, pi, pj, pk, # payload quat 10
# pwx, pwy, pwz, # payload omega 13
# qpx, qpy, qpz, # quad rotor position 16
# qvx, qvy, qvz, # quad rotor velocity 19
# qu, qi, qj, qk, # quad rotor quat 23
# qwx, qwy, qwz]) # quad rotor omega 26
# OUTPUTS:
# value - a float that determines slack condition
value = np.linalg.norm(x[0:3] - x[13:16]) - ptmasstautToSlack.cable_length + 0.000001
return value
def cooperativeGuard(t, x, nquad, slack_condition, rho_vec_list, cable_length, id):
# DESCRIPTION:
# Event function event function for cooperative dynamics
# Each MAV in the simulation will have its own event function
# This function will be called under a lambda handle
# INPUTS:
# t - time
# x - (13 + 13*nquad) x 1,
# state vector = [xL, yL, zL, xLd, yLd, zLd,
# qLw, qLx, qLy, qLz, pL, qL, rL,
# [xQ, yQ, zQ, xQd, yQd, zQd]_i, i = 1,...,nquad
# [qw, qx, qy, qz, pQ, qQ, rQ]_i, i = 1,...,nquad
# nquad - number of quads
# slack_condition - a size nquad array, denoting cable condition for each MAV
# rho_vec_list - a nquad by nquad matrix, denoting mounting position for each MAV
# cable_length - The cable's length
# id - a scalar, denoting the current MAV number
# OUTPUTS:
# value - (attach points - robot distance) - cable_length
# find the idx of the cables that are slack
idx = np.arange(1, nquad+1)
slack_cable_idx = idx[slack_condition == 1]
taut_cable_idx = idx[slack_condition == 0]
# The rotation matrix of the payload
RotL = utilslib.QuatToRot(x[6:10])
# The attach points' positions correspond to the slack cables.
attach_pts = x[0:3].reshape((3,1)) + RotL @ rho_vec_list
# The quadrotor positions correspond to the slack cables.
slack_quad_pos_idx = 13*slack_cable_idx + np.array([[0],[1],[2]])
taut_quad_pos_idx = 13*taut_cable_idx + np.array([[0],[1],[2]])
# Set up the condition to terminate the integration.
# Detect cable-robot distance = 0
left = np.linalg.norm(x[slack_quad_pos_idx] - attach_pts[:,slack_cable_idx - 1],2,0)-cable_length[slack_cable_idx - 1]
right = np.linalg.norm(x[taut_quad_pos_idx] - attach_pts[:,taut_cable_idx - 1],2,0)-cable_length[taut_cable_idx - 1] + 0.0001
value = np.transpose(np.hstack((left, right)))
return value[id]
class simulation_base():
def __init__(self,pl_params,uav_params):
rospy.init_node('simulation')
self.rate = 100
rate = rospy.Rate(self.rate)
t_span = (0,1/self.rate)
self.worldframe = "simulator"
################################## init parameters ################################
self.pl_params = pl_params
self.uav_params = uav_params
self.nquad = self.pl_params.nquad
self.pl_dim_num = 13
self.uav_dim_num = 13
self.mav_name = 'dragonfly'
self.sim_start = False
self.last_odom_time_received = 0.0
# Three scenario:
# 1. Cooperative
# 2. Point mass
# 3. Rigid link
# init parameters is done separately for each scenario below
# with if-else branch structure
if self.nquad != 1:
# First Scenario: Cooperative
if self.pl_params.id == "Cable":
print("Initalizing Cooperative Scenario")
self.uav_F = np.matmul(self.pl_params.pseudo_inv_P, np.array([0,0,self.pl_params.mass * self.pl_params.grav,0,0,0])) + np.kron([1]*self.nquad, [0,0,self.uav_params.mass * self.pl_params.grav])
self.uav_F = self.uav_F.reshape(self.nquad, 3)[:,2]
self.uav_M = np.zeros((3,self.nquad))
self.rho_vec_list = self.pl_params.rho_vec_list
self.cable_len_list = np.array(self.pl_params.cable_length)
x = np.zeros(self.pl_dim_num + self.uav_dim_num * self.nquad)
for i in range(self.nquad+1):
if i > 0:
print("initalizing robot ", i)
x[i*13:i*13+3] = x[0:3] + self.rho_vec_list[:,i-1] + np.array([0,0,self.cable_len_list[i-1]])
x[i*13 + 6] = 1
self.cable_is_slack = self.isslack_multi(x[0:3].reshape((3,1))+ utilslib.QuatToRot(x[6:10]) @ self.pl_params.rho_vec_list, x[13*np.arange(1, self.nquad+1)+np.array([[0],[1],[2]])],self.pl_params.cable_length)
# Third Scenario: Rigid link
else:
print("Initalizing Rigidlink Scenario")
self.cable_is_slack = np.zeros(self.nquad) # Dummy assignment (no cable for rigid link)
# TODO how to deal with rho_vec_list (currently hardcode from rigid_links_mechanism.yaml
self.cable_len_list = np.zeros((self.nquad, 1), dtype=float)
# x = (26, 1) state init
# s - 13 x 1, state vector = [xL, yL, zL, xLd, yLd, zLd, qw, qx, qy, qz, pL, qL, rL]
# take_in/output - 26 x 1, state vector(dot-elementwise) = [ xL, yL, zL,
# xLd, yLd, zLd,
# Name Element Location
x = np.array([0.0, 0.0, 0.0, # pl pos 3
0.0, 0.0, 0.0, # pl vel 6
1.0, 0.0, 0.0, 0.0, # pl quat 10
0.0, 0.0, 0.0, # pl omega 13
0.0, 0.0, 0.0, # qd pos 16
0.0, 0.0, 0.0, # qd vel 19
1.0, 0.0, 0.0, 0.0, # qd quat 23
0.0, 0.0, 0.0]) # qd omega 26
qd_init = {}
# init uav_F and uav_M
qd_init["pos"] = x[0:3]
qd_init["vel"] = x[3:6]
qd_init["quat"] = x[19:23]
qd_init["omega"] = x[10:13]
qd_init["rot"] = utilslib.QuatToRot(qd_init["quat"]).T
qd_init["pos_des"] = x[0:3]
qd_init["vel_des"] = x[3:6]
qd_init["acc_des"] = np.array([[0],[0],[0]])
qd_init["jrk_des"] = np.array([[0],[0],[0]])
qd_init["qd_yaw_des"] = 0
qd_init["qd_yawdot_des"] = 0
qd_init["quat_des"] = x[19:23]
qd_init["omega_des"] = x[10:13]
robot_trust_moment = self.rigid_links_cooperative_payload_controller_init(qd_init, self.pl_params)
u = self.pl_params.A @ robot_trust_moment
self.uav_F = u[0] * qd_init["rot"][:,2].reshape((3,1))
self.uav_M = u[1:4]
# Second Scenario: Point Mass
else:
print("Initalizing Ptmass Scenario")
## init force to [sum of (quad and payload mass)] * gravity
self.uav_F = np.array([(self.pl_params.mass + self.uav_params.mass) * self.uav_params.grav])
## init M to [0; 0; 0]
self.uav_M = np.zeros((3,self.nquad), dtype=float)
## init calbe to taut
self.cable_is_slack = np.zeros(self.nquad)
## init rho_vec_list = [] (empty)
self.rho_vec_list = self.pl_params.rho_vec_list
## init cable_len_list = cable_length (read)
self.cable_len_list = np.array(self.pl_params.cable_length)
## init state x as a (26, 1) state vector with inital position hard code to (0, 0, 0.5)
# x = (26, 1) state init
# Name Element Location
x = np.array([0.0, 0.0, 0.0, # pl pos 3
0.0, 0.0, 0.0, # pl vel 6
1.0, 0.0, 0.0, 0.0, # pl quat 10
0.0, 0.0, 0.0, # pl omega 13
0.0, 0.0, 0.5, # qd pos 16
0.0, 0.0, 0.0, # qd vel 19
1.0, 0.0, 0.0, 0.0, # qd quat 23
0.0, 0.0, 0.0]) # qd omega 26
# ROS Publisher
self.system_publisher = rospy.Publisher('system/marker',MarkerArray,queue_size=10)
self.payload_odom_publisher = rospy.Publisher('payload/odom',Odometry,queue_size=1, tcp_nodelay=True)
self.payload_path_publisher = rospy.Publisher('payload/path',Path,queue_size=1, tcp_nodelay=True)
self.payload_path = Path()
self.robot_odom_publisher = []
self.attach_publisher = []
for i in range(self.nquad):
self.robot_odom_publisher.append(rospy.Publisher(self.mav_name + str(i+1) + '/odom',Odometry, queue_size=1, tcp_nodelay=True))
self.attach_publisher.append(rospy.Publisher(self.mav_name + str(i+1) + '/attach',Odometry, queue_size=1, tcp_nodelay=True))
# ROS Subscriber
self.robot_command_subscriber = []
for uav_id in range(self.nquad):
mav_name = self.mav_name + str(uav_id+1)
controller_name = "/controller_" + str(uav_id+1)
self.robot_command_subscriber.append(rospy.Subscriber(controller_name + '/' + mav_name + '/rpm_cmd',RPMCommand,self.rpm_command_callback,uav_id,queue_size=1, tcp_nodelay=True))
self.robot_command_subscriber.append(rospy.Subscriber(controller_name + '/' + mav_name + '/fm_cmd',FMCommand,self.fm_command_callback,uav_id,queue_size=1, tcp_nodelay=True))
# Visualization Init
self.cable_marker_scale = 0.01 * np.ones(3)
self.cable_marker_color = np.array([1.0,0.5,0.5,0.5])
self.uav_marker_scale = 0.5 * np.ones(3)
self.uav_marker_color = np.array([1.0,0.0,0.0,1.0])
self.uav_mesh = self.uav_params.mesh_path
if self.nquad == 1:
self.payload_marker_scale = np.array([0.1,0.1,0.1])
else:
self.payload_marker_scale = np.ones(3)
self.payload_marker_color = np.array([1.0,0.745,0.812,0.941])
self.payload_mesh = self.pl_params.mesh_path
if self.nquad == 1:
self.payload_marker_msg = rosutilslib.init_marker_msg(Marker(),2,0,self.worldframe,self.payload_marker_scale,self.payload_marker_color,self.payload_mesh)
else:
self.payload_marker_msg = rosutilslib.init_marker_msg(Marker(),10,0,self.worldframe,self.payload_marker_scale,self.payload_marker_color,self.payload_mesh)
################################## Simulation Loop ################################
try:
self.hybrid_flag = rospy.get_param("hybrid_switch")
except:
rospy.set_param("hybrid_switch", True)
self.hybrid_flag = rospy.get_param("hybrid_switch")
if self.hybrid_flag:
print("\n############################################################")
print("HYBRID DYNAMICS IS TURN ON")
print("LOW PERFORMANCE PROCESSOR PROCEDE WITH CAUSTION")
print("############################################################\n")
while not rospy.is_shutdown():
start = time.time()
# Three scenario:
# 1. Cooperative
# 2. Point mass
# 3. Rigid link
# dynamics solver is built separately for each scenario below
# with if-else branch structure
# Thrid Scenario: Rigid Link
if self.pl_params.id == "Rigid Link":
sol = scipy.integrate.solve_ivp(self.rigid_links_cooperative_rigidbody_pl_EOM, t_span, x, method= 'RK45', t_eval=t_span)
x = sol.y[:,-1]
else:
# Second Scenario: Point Mass
if self.nquad == 1:
## first check for inelastic collision
pl_pos = x[0:3]
pl_vel = x[3:6]
robot_pos = x[13:16]
robot_vel = x[16:19]
cable_norm_vel = np.transpose(pl_pos - robot_pos) @ (pl_vel - robot_vel)
## if collision, compute new velocities and assign to state
if cable_norm_vel > 1e-6 and not self.cable_is_slack:
v1, v2 = self.ptmass_inelastic_cable_collision(x[0:6], x[13:19], self.pl_params.mass, self.uav_params.mass)
x[3:6] = v1
x[16:19] = v2
## set up event for ivp solver
ptmasstautToSlack.terminal = True
ptmassslackToTaut.terminal = True
ptmasstautToSlack.direction = -1
ptmassslackToTaut.direction = 1
ptmassslackToTaut.cable_length = self.pl_params.cable_length
ptmasstautToSlack.cable_length = self.pl_params.cable_length
## state integration
if self.cable_is_slack:
# print("Cable is slack")
sol = scipy.integrate.solve_ivp(self.hybrid_ptmass_pl_transportationEOM, t_span, x, method= 'RK45', t_eval=t_span, events=ptmassslackToTaut)
else:
# print("Cable is taut")
sol = scipy.integrate.solve_ivp(self.hybrid_ptmass_pl_transportationEOM, t_span, x, method= 'RK45', t_eval=t_span, events=ptmasstautToSlack)
## extract state from solver soltion
if (np.all(x==sol.y[:, -1])) and (len(sol.y_events[0]) != 0):
x = sol.y_events[0][:]
x = x.T
x = x.reshape((x.shape[0],))
else:
x = sol.y[:,-1]
## recheck cable slack condition
self.cable_is_slack = self.isslack(x[0:3], x[13:16], self.pl_params.cable_length)
# Third Scenario: Cooperative
else:
## first, we check for collision
inelastic_collision_flag = self.cooperative_check_inelastic(x)
## make sure velocities are distributed with no new collisions happening
while np.any(inelastic_collision_flag):
# print("collision!")
before_collide_inelastic_collision_flag = inelastic_collision_flag
x = self.rigidbody_quad_inelastic_cable_collision(x, inelastic_collision_flag)
# print("collision finished!")
after_collide_inelastic_collision_flag = self.cooperative_check_inelastic(x)
if np.any((after_collide_inelastic_collision_flag - before_collide_inelastic_collision_flag)>0):
inelastic_collision_flag = after_collide_inelastic_collision_flag + before_collide_inelastic_collision_flag
for i in range(inelastic_collision_flag.shape[0]):
if inelastic_collision_flag[i] != 0:
inelastic_collision_flag[i] = 1.0
else:
inelastic_collision_flag = after_collide_inelastic_collision_flag
## set up event for ivp solver
slack_condition = self.cable_is_slack
idx = np.arange(1, self.pl_params.nquad+1)
slack_cable_idx = idx[slack_condition == 1]
taut_cable_idx = idx[slack_condition == 0]
num_of_slack_cable = np.max(slack_cable_idx.shape)
num_of_taut_cable = np.max(taut_cable_idx.shape)
GuardEvents = []
for i in range(self.nquad):
GuardEvents.append(lambda t, x: cooperativeGuard(t, x, GuardEvents[i].pl_params.nquad, GuardEvents[i].slack_condition, GuardEvents[i].pl_params.rho_vec_list, GuardEvents[i].pl_params.cable_length, GuardEvents[i].i))
temp = np.zeros((slack_condition.shape), dtype=float)
for i in range(self.nquad):
if slack_condition[i]:
temp[i] = 1.0
else:
temp[i] = -1.0
id = 0
for fcn in GuardEvents:
fcn.terminal = True
if num_of_slack_cable == 0:
fcn.direction = -1.0
elif num_of_taut_cable == 0:
fcn.direction = 1.0
else:
fcn.direction = temp[id]
fcn.pl_params = self.pl_params
fcn.slack_condition = slack_condition
fcn.i = id
id = id + 1
## state integration
sol = scipy.integrate.solve_ivp(self.hybrid_cooperative_rigidbody_pl_transportationEOM, t_span, x, method='RK45', t_eval=t_span, events=GuardEvents)
## extract state from solver soltion
EventTriggered_bool = sol.status
EventTriggered_id = 0
if EventTriggered_bool == 1:
for i in range(self.nquad):
if len(sol.y_events[i]) != 0:
EventTriggered_id = i
if (np.all(x==sol.y[:, -1])):
x = sol.y_events[EventTriggered_id][:]
x = x.T
x = x.reshape((x.shape[0],))
else:
x = sol.y[:,-1]
self.cable_is_slack = self.isslack_multi(x[0:3].reshape((3,1))+ utilslib.QuatToRot(x[6:10]) @ self.pl_params.rho_vec_list, x[13*np.arange(1, self.nquad+1)+np.array([[0],[1],[2]])],self.pl_params.cable_length)
end = time.time()
# Publish payload odometry
current_time = rospy.get_rostime()
payload_odom = Odometry()
payload_odom.header.stamp = current_time
payload_odom.header.frame_id = self.worldframe
payload_rotmat = utilslib.QuatToRot(sol.y[:,0][6:10])
if self.pl_params.mechanism_type == 'Rigid Link':
# for rigid link scenario, the payload position is uav position
self.load_pos = x[0:3].reshape((3,1)) + payload_rotmat @ self.pl_params.rho_load
payload_odom.pose.pose.position.x = self.load_pos[0]
payload_odom.pose.pose.position.y = self.load_pos[1]
payload_odom.pose.pose.position.z = self.load_pos[2]
else:
payload_odom.pose.pose.position.x = x[0]
payload_odom.pose.pose.position.y = x[1]
payload_odom.pose.pose.position.z = x[2]
payload_odom.twist.twist.linear.x = x[3]
payload_odom.twist.twist.linear.y = x[4]
payload_odom.twist.twist.linear.z = x[5]
payload_odom.pose.pose.orientation.w = x[6]
payload_odom.pose.pose.orientation.x = x[7]
payload_odom.pose.pose.orientation.y = x[8]
payload_odom.pose.pose.orientation.z = x[9]
payload_odom.twist.twist.angular.x = x[10]
payload_odom.twist.twist.angular.y = x[11]
payload_odom.twist.twist.angular.z = x[12]
self.payload_odom_publisher.publish(payload_odom)
# Publish payload path
current_time = rospy.get_rostime()
self.payload_path.header.stamp = current_time
self.payload_path.header.frame_id = self.worldframe
payload_rotmat = utilslib.QuatToRot(sol.y[:,0][6:10])
pl_pose_stamped = PoseStamped()
pl_pose_stamped.header.stamp = current_time
pl_pose_stamped.header.frame_id = self.worldframe
if self.pl_params.mechanism_type == 'Rigid Link':
# for rigid link scenario, the payload position is uav position
self.load_pos = x[0:3].reshape((3,1)) + payload_rotmat @ self.pl_params.rho_load
pl_pose_stamped.pose.position.x = self.load_pos[0]
pl_pose_stamped.pose.position.y = self.load_pos[1]
pl_pose_stamped.pose.position.z = self.load_pos[2]
else:
pl_pose_stamped.pose.position.x = x[0]
pl_pose_stamped.pose.position.y = x[1]
pl_pose_stamped.pose.position.z = x[2]
pl_pose_stamped.pose.orientation.w = x[6]
pl_pose_stamped.pose.orientation.x = x[7]
pl_pose_stamped.pose.orientation.y = x[8]
pl_pose_stamped.pose.orientation.z = x[9]
self.payload_path.poses.append(pl_pose_stamped)
self.payload_path_publisher.publish(self.payload_path)
system_marker = MarkerArray()
cable_point_list = np.zeros((2*self.nquad,3))
for uav_id in range(self.nquad):
if self.pl_params.mechanism_type == 'Rigid Link':
uav_state = x[13:26]
attach_pos = self.load_pos.reshape((3,)) + np.matmul(payload_rotmat, (self.pl_params.rho_robot[:,uav_id]+np.array([0.028,0,0.032])))
uav_state[0:3] = attach_pos
attach_vel = uav_state[3:6] + np.matmul(payload_rotmat, np.cross(sol.y[:,0][10:13], self.pl_params.rho_robot[:,uav_id]))
if not self.cable_is_slack[uav_id]:
uav_attach_vector = uav_state[0:3] - attach_pos[0:3]
uav_attach_distance = np.linalg.norm(uav_attach_vector)
if uav_attach_distance > self.cable_len_list[uav_id]:
xi = uav_attach_vector/uav_attach_distance
uav_state[0:3] = attach_pos[0:3] + self.cable_len_list[uav_id] * xi
# Publish UAV odometry
uav_odom = Odometry()
uav_odom.header.stamp = current_time
uav_odom.header.frame_id = self.worldframe
uav_odom.pose.pose.position.x = uav_state[0]
uav_odom.pose.pose.position.y = uav_state[1]
uav_odom.pose.pose.position.z = uav_state[2]
uav_odom.twist.twist.linear.x = uav_state[3]
uav_odom.twist.twist.linear.y = uav_state[4]
uav_odom.twist.twist.linear.z = uav_state[5]
uav_odom.pose.pose.orientation.w = uav_state[6]
uav_odom.pose.pose.orientation.x = uav_state[7]
uav_odom.pose.pose.orientation.y = uav_state[8]
uav_odom.pose.pose.orientation.z = uav_state[9]
uav_odom.twist.twist.angular.x = uav_state[10]
uav_odom.twist.twist.angular.y = uav_state[11]
uav_odom.twist.twist.angular.z = uav_state[12]
self.robot_odom_publisher[uav_id].publish(uav_odom)
# Publish UAV attach odometry
attach_odom = Odometry()
attach_odom.header.stamp = current_time
attach_odom.header.frame_id = self.worldframe
attach_odom.pose.pose.position.x = attach_pos[0]
attach_odom.pose.pose.position.y = attach_pos[1]
attach_odom.pose.pose.position.z = attach_pos[2]
attach_odom.twist.twist.linear.x = attach_vel[0]
attach_odom.twist.twist.linear.y = attach_vel[1]
attach_odom.twist.twist.linear.z = attach_vel[2]
self.attach_publisher[uav_id].publish(attach_odom)
else:
uav_state = x[self.pl_dim_num+self.uav_dim_num*uav_id:self.pl_dim_num+self.uav_dim_num*(uav_id+1)]
attach_pos = x[0:3] + np.matmul(payload_rotmat, self.rho_vec_list[:,uav_id])
attach_vel = x[3:6] + np.matmul(payload_rotmat, np.cross(sol.y[:,0][10:13], self.rho_vec_list[:,uav_id]))
if not self.cable_is_slack[uav_id]:
uav_attach_vector = uav_state[0:3] - attach_pos[0:3]
uav_attach_distance = np.linalg.norm(uav_attach_vector)
if uav_attach_distance > self.cable_len_list[uav_id]:
xi = uav_attach_vector/uav_attach_distance
uav_state[0:3] = attach_pos[0:3] + self.cable_len_list[uav_id] * xi
# Publish UAV odometry
uav_odom = Odometry()
uav_odom.header.stamp = current_time
uav_odom.header.frame_id = self.worldframe
uav_odom.pose.pose.position.x = uav_state[0]
uav_odom.pose.pose.position.y = uav_state[1]
uav_odom.pose.pose.position.z = uav_state[2]
uav_odom.twist.twist.linear.x = uav_state[3]
uav_odom.twist.twist.linear.y = uav_state[4]
uav_odom.twist.twist.linear.z = uav_state[5]
uav_odom.pose.pose.orientation.w = uav_state[6]
uav_odom.pose.pose.orientation.x = uav_state[7]
uav_odom.pose.pose.orientation.y = uav_state[8]
uav_odom.pose.pose.orientation.z = uav_state[9]
uav_odom.twist.twist.angular.x = uav_state[10]
uav_odom.twist.twist.angular.y = uav_state[11]
uav_odom.twist.twist.angular.z = uav_state[12]
self.robot_odom_publisher[uav_id].publish(uav_odom)
# Publish UAV attach odometry
attach_odom = Odometry()
attach_odom.header.stamp = current_time
attach_odom.header.frame_id = self.worldframe
attach_odom.pose.pose.position.x = attach_pos[0]
attach_odom.pose.pose.position.y = attach_pos[1]
attach_odom.pose.pose.position.z = attach_pos[2]
attach_odom.twist.twist.linear.x = attach_vel[0]
attach_odom.twist.twist.linear.y = attach_vel[1]
attach_odom.twist.twist.linear.z = attach_vel[2]
self.attach_publisher[uav_id].publish(attach_odom)
cable_point_list[2*uav_id,:] = uav_state[0:3]
cable_point_list[2*uav_id+1,:] = attach_pos[0:3]
uav_marker_msg = rosutilslib.init_marker_msg(Marker(),10,0,self.worldframe,self.uav_marker_scale,self.uav_marker_color,self.uav_mesh)
uav_marker = rosutilslib.update_marker_msg(uav_marker_msg,uav_state[0:3],uav_state[6:10],uav_id)
system_marker.markers.append(uav_marker)
# Update cable visualization
cable_marker_msg = rosutilslib.init_marker_msg(Marker(),5,0,self.worldframe,self.cable_marker_scale,self.cable_marker_color)
system_marker.markers.append(rosutilslib.update_line_msg(cable_marker_msg,cable_point_list,uav_id + 1))
# Update payload visualization
system_marker.markers.append(rosutilslib.update_marker_msg(self.payload_marker_msg,x[0:3],x[6:10],uav_id+2))
self.system_publisher.publish(system_marker)
rate.sleep()
elif self.hybrid_flag == False:
print("\n############################################################")
print("HYBRID DYNAMICS IS TURN OFF")
print("############################################################\n")
while not rospy.is_shutdown():
start = time.time()
if self.pl_params.id == "Rigid Link":
sol = scipy.integrate.solve_ivp(self.rigid_links_cooperative_rigidbody_pl_EOM, t_span, x, method= 'RK45', t_eval=t_span)
else:
if self.nquad == 1:
sol = scipy.integrate.solve_ivp(self.hybrid_ptmass_pl_transportationEOM, t_span, x, method= 'RK45', t_eval=t_span)
else:
sol = scipy.integrate.solve_ivp(self.hybrid_cooperative_rigidbody_pl_transportationEOM, t_span, x, method='RK45', t_eval=t_span)
end = time.time()
x = sol.y[:,1]
# Publish payload odometry
current_time = rospy.get_rostime()
payload_odom = Odometry()
payload_odom.header.stamp = current_time
payload_odom.header.frame_id = self.worldframe
payload_rotmat = utilslib.QuatToRot(sol.y[:,0][6:10])
if self.pl_params.mechanism_type == 'Rigid Link':
self.load_pos = x[0:3].reshape((3,1)) + payload_rotmat @ self.pl_params.rho_load
payload_odom.pose.pose.position.x = self.load_pos[0]
payload_odom.pose.pose.position.y = self.load_pos[1]
payload_odom.pose.pose.position.z = self.load_pos[2]
else:
payload_odom.pose.pose.position.x = x[0]
payload_odom.pose.pose.position.y = x[1]
payload_odom.pose.pose.position.z = x[2]
payload_odom.twist.twist.linear.x = x[3]
payload_odom.twist.twist.linear.y = x[4]
payload_odom.twist.twist.linear.z = x[5]
payload_odom.pose.pose.orientation.w = x[6]
payload_odom.pose.pose.orientation.x = x[7]
payload_odom.pose.pose.orientation.y = x[8]
payload_odom.pose.pose.orientation.z = x[9]
payload_odom.twist.twist.angular.x = x[10]
payload_odom.twist.twist.angular.y = x[11]
payload_odom.twist.twist.angular.z = x[12]
self.payload_odom_publisher.publish(payload_odom)
# Publish payload path
current_time = rospy.get_rostime()
self.payload_path.header.stamp = current_time
self.payload_path.header.frame_id = self.worldframe
payload_rotmat = utilslib.QuatToRot(sol.y[:,0][6:10])
pl_pose_stamped = PoseStamped()
pl_pose_stamped.header.stamp = current_time
pl_pose_stamped.header.frame_id = self.worldframe
if self.pl_params.mechanism_type == 'Rigid Link':
# for rigid link scenario, the payload position is uav position
self.load_pos = x[0:3].reshape((3,1)) + payload_rotmat @ self.pl_params.rho_load
pl_pose_stamped.pose.position.x = self.load_pos[0]
pl_pose_stamped.pose.position.y = self.load_pos[1]
pl_pose_stamped.pose.position.z = self.load_pos[2]
else:
pl_pose_stamped.pose.position.x = x[0]
pl_pose_stamped.pose.position.y = x[1]
pl_pose_stamped.pose.position.z = x[2]
pl_pose_stamped.pose.orientation.w = x[6]
pl_pose_stamped.pose.orientation.x = x[7]
pl_pose_stamped.pose.orientation.y = x[8]
pl_pose_stamped.pose.orientation.z = x[9]
self.payload_path.poses.append(pl_pose_stamped)
self.payload_path_publisher.publish(self.payload_path)
system_marker = MarkerArray()
cable_point_list = np.zeros((2*self.nquad,3))
for uav_id in range(self.nquad):
if self.pl_params.mechanism_type == 'Rigid Link':
uav_state = x[13:26]
attach_pos = self.load_pos.reshape((3,)) + np.matmul(payload_rotmat, (self.pl_params.rho_robot[:,uav_id]+np.array([0.028,0,0.032])))
uav_state[0:3] = attach_pos
attach_vel = uav_state[3:6] + np.matmul(payload_rotmat, np.cross(sol.y[:,0][10:13], self.pl_params.rho_robot[:,uav_id]))
if not self.cable_is_slack[uav_id]:
uav_attach_vector = uav_state[0:3] - attach_pos[0:3]
uav_attach_distance = np.linalg.norm(uav_attach_vector)
if uav_attach_distance > self.cable_len_list[uav_id]:
xi = uav_attach_vector/uav_attach_distance
uav_state[0:3] = attach_pos[0:3] + self.cable_len_list[uav_id] * xi
# Publish UAV odometry
uav_odom = Odometry()
uav_odom.header.stamp = current_time
uav_odom.header.frame_id = self.worldframe
uav_odom.pose.pose.position.x = uav_state[0]
uav_odom.pose.pose.position.y = uav_state[1]
uav_odom.pose.pose.position.z = uav_state[2]
uav_odom.twist.twist.linear.x = uav_state[3]
uav_odom.twist.twist.linear.y = uav_state[4]
uav_odom.twist.twist.linear.z = uav_state[5]
uav_odom.pose.pose.orientation.w = uav_state[6]
uav_odom.pose.pose.orientation.x = uav_state[7]
uav_odom.pose.pose.orientation.y = uav_state[8]
uav_odom.pose.pose.orientation.z = uav_state[9]
uav_odom.twist.twist.angular.x = uav_state[10]
uav_odom.twist.twist.angular.y = uav_state[11]
uav_odom.twist.twist.angular.z = uav_state[12]
self.robot_odom_publisher[uav_id].publish(uav_odom)
# Publish UAV attach odometry
attach_odom = Odometry()
attach_odom.header.stamp = current_time
attach_odom.header.frame_id = self.worldframe
attach_odom.pose.pose.position.x = attach_pos[0]
attach_odom.pose.pose.position.y = attach_pos[1]
attach_odom.pose.pose.position.z = attach_pos[2]
attach_odom.twist.twist.linear.x = attach_vel[0]
attach_odom.twist.twist.linear.y = attach_vel[1]
attach_odom.twist.twist.linear.z = attach_vel[2]
self.attach_publisher[uav_id].publish(attach_odom)
else:
uav_state = x[self.pl_dim_num+self.uav_dim_num*uav_id:self.pl_dim_num+self.uav_dim_num*(uav_id+1)]
attach_pos = x[0:3] + | np.matmul(payload_rotmat, self.rho_vec_list[:,uav_id]) | numpy.matmul |
import numpy as np
from scipy.optimize import minimize
from scipy.io import loadmat
from numpy.linalg import det, inv
from math import sqrt, pi
import scipy.io
import matplotlib.pyplot as plt
import pickle
import sys
# Done
def ldaLearn(X,y):
'''
Inputs
X - a N x d matrix with each row corresponding to a training example
y - a N x 1 column vector indicating the labels for each training example
Outputs
means - A d x k matrix containing learnt means for each of the k classes
covmat - A single d x d learnt covariance matrix
'''
labels = np.unique(y)
total_label = labels.shape[0]
total_feature = X.shape[1]
means = np.zeros([total_label,total_feature])
r = 0
for i in labels:
data = X[np.where(y == i)[0],]
m = np.mean(data,axis=0)
means[r,] = m
r +=1
X_transpose = np.transpose(X)
covmat = np.cov(X_transpose)
return means,covmat
# Done
def qdaLearn(X,y):
'''
Inputs
X - a N x d matrix with each row corresponding to a training example
y - a N x 1 column vector indicating the labels for each training example
Outputs
means - A d x k matrix containing learnt means for each of the k classes
covmats - A list of k d x d learnt covariance matrices for each of the k classes
'''
# IMPLEMENT THIS METHOD
covmats = []
labels = np.unique(y)
total_label = labels.shape[0]
total_feature = X.shape[1]
means = np.zeros([total_label,total_feature])
r = 0
for i in labels:
data = X[np.where(y == i)[0],]
m = np.mean(data,axis=0)
means[r,] = m
r +=1
data_transpose = np.transpose(data)
covmats.append(np.cov(data_transpose))
return means,covmats
# Done
def ldaTest(means,covmat,Xtest,ytest):
r = Xtest.shape[0]
c = means.shape[0]
res = np.zeros((r,c))
f = 1/np.sqrt((2*pi)**means.shape[1]*det(covmat))
for j in range(means.shape[0]):
res[:,j] = f * np.exp(-0.5*np.array([np.dot(np.dot((Xtest[i,:] - means[j,:]),inv(covmat)),np.transpose(Xtest[i,:] - means[j,:])) for i in range(Xtest.shape[0])]))
ypred = np.argmax(res,axis=1) + 1
res = (ypred == ytest.ravel())
acc_data = np.where(res)[0]
acc = len(acc_data)
return float(acc)/len(ytest),ypred
# Done
def qdaTest(means,covmats,Xtest,ytest):
res = np.zeros((Xtest.shape[0],means.shape[0]))
for j in range(means.shape[0]):
f = 1/np.sqrt((2*pi)**means.shape[1]*det(covmats[j]))
res[:,j] = f * np.exp(-0.5*np.array([np.dot(np.dot((Xtest[i,:] - means[j,:]),inv(covmats[j])),np.transpose(Xtest[i,:] - means[j,:])) for i in range(Xtest.shape[0])]))
ypred = np.argmax(res,axis=1) + 1
res = (ypred == ytest.ravel())
acc = len(np.where(res)[0])
return float(acc)/len(ytest),ypred
# Done
def learnOLERegression(X,y):
'''
Inputs:
X = N x d (Input data matrix for training)
y = N x 1 (Target vector for training)
Output:
w = d x 1 (Learned weight vector)
'''
# The formula for learning w in OLE : w = Inverse((Xtranspose * X)) * Xtranspose * y
X_transpose = np.transpose(X)
X_X_transpose = np.dot(X_transpose,X)
Inverse_X_X_transpose = np.linalg.inv(X_X_transpose)
w = np.dot(np.dot(Inverse_X_X_transpose,X_transpose),y)
return w
# Done
def learnRidgeRegression(X,y,lambd):
'''
Inputs:
X = N x d (Input data matrix for training)
y = N x 1 (Target vector for training)
lambd = ridge parameter (scalar)
Output:
w = d x 1 (Learned weight vector)
'''
# The formula for learning w in Ridge Regression :
# w = Inverse(( Lamda* Identity(d)) + Xtranspose * X) * Xtranspose * y
I = np.identity(X.shape[1])
lambd_I = np.dot(lambd,I)
X_transpose = np.transpose(X)
X_X_transpose = np.dot(X_transpose,X)
Inverse_part = np.linalg.inv(lambd_I + X_X_transpose)
w = np.dot(np.dot(Inverse_part,X_transpose),y)
return w
# Done
def testOLERegression(w,Xtest,ytest):
'''
Inputs:
w = d x 1
Xtest = N x d
ytest = X x 1
Output:
mse
'''
y_predict = np.dot(Xtest,w)
diff = (ytest - y_predict)
diff_transpose = np.transpose(diff)
N = 1 /len(Xtest)
mse = np.dot( np.dot(N,diff_transpose), diff )
return mse
# Done
def regressionObjVal(w, X, y, lambd):
'''
compute squared error (scalar) and gradient of squared error with respect
to w (vector) for the given data X and y and the regularization parameter lambda
'''
# The formula for learning w in Ridge Regression using Gradient Descent :
# XTranspose * ( y - Xw)
w_tranpose = np.asmatrix(w).transpose()
X_w_tranpose = np.dot(X,w_tranpose)
diff = (y - X_w_tranpose)
diff_transpose = np.transpose(diff)
diff_diff = (np.dot(diff_transpose,diff))
w_w_tranpose = np.dot(np.asmatrix(w),w_tranpose)
error = 0.5*(diff_diff + lambd*w_w_tranpose)
error_grad = -(np.dot(np.transpose(X),diff)) + lambd*w_tranpose
error_grad = np.squeeze(np.array(error_grad))
return error, error_grad
# Done
def mapNonLinear(x,p):
'''
Inputs:
x - a single column vector (N x 1)
p - integer (>= 0)
Outputs:
Xp - (N x (p+1))
'''
Xp = np.zeros((x.shape[0],p+1))
for i in range(p+1):
Xp[:,i] = pow(x,i)
return Xp
# Main script
# Problem 1
if sys.version_info.major == 2:
X,y,Xtest,ytest = pickle.load(open('sample.pickle','rb'))
else:
X,y,Xtest,ytest = pickle.load(open('sample.pickle','rb'),encoding = 'latin1')
# LDA
means,covmat = ldaLearn(X,y)
ldaacc,ldares = ldaTest(means,covmat,Xtest,ytest)
print('LDA Accuracy = '+str(ldaacc))
# QDA
means,covmats = qdaLearn(X,y)
qdaacc,qdares = qdaTest(means,covmats,Xtest,ytest)
print('QDA Accuracy = '+str(qdaacc))
# plotting boundaries
x1 = np.linspace(-5,20,100)
x2 = np.linspace(-5,20,100)
xx1,xx2 = np.meshgrid(x1,x2)
xx = np.zeros((x1.shape[0]*x2.shape[0],2))
xx[:,0] = xx1.ravel()
xx[:,1] = xx2.ravel()
fig = plt.figure(figsize=[18,9])
plt.subplot(1, 2, 1)
zacc,zldares = ldaTest(means,covmat,xx,np.zeros((xx.shape[0],1)))
plt.contourf(x1,x2,zldares.reshape((x1.shape[0],x2.shape[0])),alpha=0.3)
plt.scatter(Xtest[:,0],Xtest[:,1],c=ytest[:,0])
plt.title('LDA')
plt.subplot(1, 2, 2)
zacc,zqdares = qdaTest(means,covmats,xx,np.zeros((xx.shape[0],1)))
plt.contourf(x1,x2,zqdares.reshape((x1.shape[0],x2.shape[0])),alpha=0.3)
plt.scatter(Xtest[:,0],Xtest[:,1],c=ytest[:,0])
plt.title('QDA')
# Problem 2
if sys.version_info.major == 2:
X,y,Xtest,ytest = pickle.load(open('diabetes.pickle','rb'))
else:
X,y,Xtest,ytest = pickle.load(open('diabetes.pickle','rb'),encoding = 'latin1')
# add intercept
X_i = np.concatenate((np.ones((X.shape[0],1)), X), axis=1)
Xtest_i = np.concatenate((np.ones((Xtest.shape[0],1)), Xtest), axis=1)
w = learnOLERegression(X,y)
mle = testOLERegression(w,Xtest,ytest)
w_i = learnOLERegression(X_i,y)
mle_i = testOLERegression(w_i,Xtest_i,ytest)
print('MSE without intercept '+str(mle))
print('MSE with intercept '+str(mle_i))
# Problem 3
k = 101
lambdas = np.linspace(0, 1, num=k)
i = 0
mses3_train = np.zeros((k,1))
mses3 = np.zeros((k,1))
for lambd in lambdas:
w_l = learnRidgeRegression(X_i,y,lambd)
mses3_train[i] = testOLERegression(w_l,X_i,y)
mses3[i] = testOLERegression(w_l,Xtest_i,ytest)
i = i + 1
fig = plt.figure(figsize=[12,6])
plt.subplot(1, 2, 1)
plt.plot(lambdas,mses3_train)
plt.title('MSE for Train Data')
plt.subplot(1, 2, 2)
plt.plot(lambdas,mses3)
plt.title('MSE for Test Data')
plt.show()
# Problem 4
k = 101
lambdas = np.linspace(0, 1, num=k)
i = 0
mses4_train = np.zeros((k,1))
mses4 = np.zeros((k,1))
opts = {'maxiter' : 20} # Preferred value.
w_init = np.ones((X_i.shape[1],1))
for lambd in lambdas:
args = (X_i, y, lambd)
w_l = minimize(regressionObjVal, w_init, jac=True, args=args,method='CG', options=opts)
w_l = np.transpose(np.array(w_l.x))
w_l = np.reshape(w_l,[len(w_l),1])
mses4_train[i] = testOLERegression(w_l,X_i,y)
mses4[i] = testOLERegression(w_l,Xtest_i,ytest)
i = i + 1
fig = plt.figure(figsize=[12,6])
plt.subplot(1, 2, 1)
plt.plot(lambdas,mses4_train)
plt.plot(lambdas,mses3_train)
plt.title('MSE for Train Data')
plt.legend(['Using scipy.minimize','Direct minimization'])
plt.subplot(1, 2, 2)
plt.plot(lambdas,mses4)
plt.plot(lambdas,mses3)
plt.title('MSE for Test Data')
plt.legend(['Using scipy.minimize','Direct minimization'])
plt.show()
# Problem 5
pmax = 7
lambda_opt = 0.06 # REPLACE THIS WITH lambda_opt estimated from Problem 3
mses5_train = | np.zeros((pmax,2)) | numpy.zeros |
"""
Free-flyer Gripper Grasping. For model-free RL learning of trajectory to grasp an object.
*0*o798gaWoJ
"""
import logging
import math
import gym
from gym import spaces
from gym.utils import seeding
import numpy as np
import scipy
from scipy.integrate import odeint
def soft_abs(x, alpha=1.0, d=0):
z = np.sqrt(alpha**2 + x**2)
if d == 0:
return z - alpha
if d == 1:
return x/z
if d == 2:
return alpha**2 / z**3
def vector_cross(x,y):
"""
Does cross product of two 3x1 np arrays.
Normal numpy cross product only takes vectors.
"""
assert x.shape[0] == 3
assert y.shape[0] == 3
return np.expand_dims(np.cross(x[:,0],y[:,0]), axis=-1)
def vector_dot(x,y):
"""
Does dot product of two 3x1 np arrays.
Normal numpy dot product only takes vectors.
"""
assert x.shape[0] == 3
assert y.shape[0] == 3
return np.dot(x[:,0],y[:,0])
def norm_angle(th):
while th > math.pi:
th -= math.pi
while th < -math.pi:
th += math.pi
return th
logger = logging.getLogger(__name__)
class GraspEnv(gym.Env):
metadata = {
'render.modes': ['human', 'rgb_array'],
'video.frames_per_second' : 30
}
def __init__(self,costf='simple',randomize_params=False,rand_init=True):
self.s_dim = 12 # state: xs, ys, ths, vxs, vys, vths, xo, yo, tho, vxo, vyo, vtho
self.a_dim = 3
self.costf = 'simple'
self.randomize_params = randomize_params
self.rand_init = rand_init
#spacecraft params:
self.ms = 6700. # SSL-1300 bus
self.Js = 1/12 * 6700 * (5^2+5^2) # cube
self.rs = 2.5
self.Ls = 1.5
#object params:
self.mo_nom = 1973. # Landsat-7 bus
self.Jo_nom = 1/12 * self.mo_nom * (4^2 + 4^2) # cube
self.ro = 1.5
self.Lo = 1.5
#interface params:
self.kx = 0.5
self.ky = 0.5
self.kth = 0.5
self.dx = 0.2
self.dy = 0.2
self.dth = 0.25
self.dt = 0.1
# Randomization limits
self.panel1_len_nom = 5.
self.panel1_angle_nom = 2*math.pi/3.
self.panel2_len_nom = 5.
self.panel2_angle_nom = -2*math.pi/3.
# State + action bounds
# state: xs, ys, ths, vxs, vys, vths, xo, yo, tho, vxo, vyo, vtho
self.x_upper = 10.
self.x_lower = -10.
self.y_upper = self.x_upper
self.y_lower = self.x_lower
self.v_limit = 0.5 #vel limit for all directions
self.angle_limit = math.pi
self.angle_deriv_limit = math.pi/16.
self.f_upper = 5. # Aerojet Rocketdyne MR-111
self.f_lower = 0.
self.M_lim = 0.075 # Rockwell Collins RSI 4-75
# -- simple cost terms
self.simple_dist_cost = 0.1
self.simple_angle_cost = 0.1
self.simple_ang_vel_cost = 0.05
self.simple_vel_cost = 0.2
self.simple_f1_cost = 0.5
self.simple_f2_cost = 0.5
self.simple_m_cost = 0.7
# --
# I think this is from CM-gripper to CM-object
self.offset_distance = self.rs + self.ro + self.Ls + self.Lo
# define default initial state (note: not used if rand_init=True)
self.start_state = np.zeros(self.s_dim)
self.start_state[0] = -5.
self.start_state[6] = 5.
# define goal region, based on grasping envelope from ICRA 2016 paper
self.goal_eps_norm = 0.2 # contact
self.goal_eps_tan = 0.1 # offset
self.goal_eps_ang = math.pi/4.
self.goal_eps_vel_lower = 0.2
self.goal_eps_vel_upper = 0.8
self.goal_eps_ang_vel = math.pi
high_ob = [self.x_upper,
self.y_upper,
self.angle_limit,
self.v_limit,
self.v_limit,
self.angle_deriv_limit,
self.x_upper,
self.y_upper,
self.angle_limit,
self.v_limit,
self.v_limit,
self.angle_deriv_limit]
low_ob = [self.x_lower,
self.y_lower,
-self.angle_limit,
-self.v_limit,
-self.v_limit,
-self.angle_deriv_limit,
self.x_lower,
self.y_lower,
-self.angle_limit,
-self.v_limit,
-self.v_limit,
-self.angle_deriv_limit]
high_state = high_ob
low_state = low_ob
high_state = np.array(high_state)
low_state = np.array(low_state)
high_obsv = np.array(high_ob)
low_obsv = | np.array(low_ob) | numpy.array |
#!/usr/bin/env python3
# Copyright lowRISC contributors.
# Licensed under the Apache License, Version 2.0, see LICENSE for details.
# SPDX-License-Identifier: Apache-2.0
"""Test Vector Leakage Assessment
Typical usage:
To run the analysis without loading or saving the histograms:
>>> ./tvla.py
To save histograms in the OUTPUT_FILE for later use:
>>> ./tvla.py -o OUTPUT_FILE
To load histograms from the INPUT_FILE
>>> ./tvla.py -i INPUT_FILE
"""
import os
import logging as log
import argparse
import chipwhisperer as cw
from chipwhisperer.analyzer import aes_funcs
import matplotlib.pyplot as plt
import numpy as np
import multiprocessing
from joblib import Parallel, delayed
from pathlib import Path
from scipy.stats import ttest_ind_from_stats
class UnformattedLog(object):
def __init__(self):
self.logger = log.getLogger()
self.formatters = [handler.formatter for handler in self.logger.handlers]
def __enter__(self):
for i in range(len(self.formatters)):
self.logger.handlers[i].setFormatter(log.Formatter())
def __exit__(self, exc_type, exc_value, traceback):
for i in range(len(self.formatters)):
self.logger.handlers[i].setFormatter(self.formatters[i])
def bit_count(int_no):
"""Computes Hamming weight of a number."""
c = 0
while int_no:
int_no &= int_no - 1
c += 1
return c
# A set of functions for working with histograms.
# The distributions are stored in two matrices x and y with dimensions (M, N) where:
# - M equals the number of time samples times the number of orders, and
# - N equals the number of values (i.e. the resolution).
# The matrices hold the following data:
# - x holds the values (all rows are the same for 1st order), and
# - y holds the probabilities (one probability distribution per row/time sample).
def mean_hist_xy(x, y):
"""
Computes mean values for a set of distributions.
Both x and y are (M, N) matrices, the return value is a (M, ) vector.
"""
return np.divide(np.sum(x * y, axis=1), np.sum(y, axis=1))
def var_hist_xy(x, y, mu):
"""
Computes variances for a set of distributions.
This amounts to E[(X - E[X])**2].
Both x and y are (M, N) matrices, mu is a (M, ) vector, the return value is a (M, ) vector.
"""
# Replicate mu.
num_values = x.shape[1]
mu = np.transpose(np.tile(mu, (num_values, 1)))
# Compute the variances.
x_mu_2 = np.power(x - mu, 2)
return mean_hist_xy(x_mu_2, y)
def ttest1_hist_xy(x_a, y_a, x_b, y_b):
"""
Basic first-order t-test.
Everything needs to be a matrix.
"""
mu1 = mean_hist_xy(x_a, y_a)
mu2 = mean_hist_xy(x_b, y_b)
std1 = np.sqrt(var_hist_xy(x_a, y_a, mu1))
std2 = np.sqrt(var_hist_xy(x_b, y_b, mu2))
N1 = np.sum(y_a, axis=1)
N2 = np.sum(y_b, axis=1)
return ttest_ind_from_stats(mu1,
std1,
N1,
mu2,
std2,
N2,
equal_var=False,
alternative='two-sided')[0]
def ttest_hist_xy(x_a, y_a, x_b, y_b, num_orders):
"""
Welch's t-test for orders 1,..., num_orders.
For more details see: Reparaz et. al. "Fast Leakage Assessment", CHES 2017.
available at: https://eprint.iacr.org/2017/624.pdf
x_a and x_b are (M/num_orders, N) matrices holding the values, one value vector per row.
y_a and y_b are (M/num_orders, N) matrices holding the distributions, one distribution per row.
The return value is (num_orders, M/num_orders)
"""
num_values = x_a.shape[1]
num_samples = y_a.shape[0]
#############
# y_a / y_b #
#############
# y_a and y_b are the same for all orders and can simply be replicated along the first axis.
y_a_ord = np.tile(y_a, (num_orders, 1))
y_b_ord = np.tile(y_b, (num_orders, 1))
#############
# x_a / x_b #
#############
# x_a and x_b are different on a per-order basis. Start with an empty array.
x_a_ord = np.zeros((num_samples * num_orders, num_values))
x_b_ord = np.zeros((num_samples * num_orders, num_values))
# Compute shareable intermediate results.
if num_orders > 1:
mu_a = mean_hist_xy(x_a, y_a)
mu_b = mean_hist_xy(x_b, y_b)
if num_orders > 2:
var_a = var_hist_xy(x_a, y_a, mu_a)
var_b = var_hist_xy(x_b, y_b, mu_b)
sigma_a = np.transpose(np.tile(np.sqrt(var_a), (num_values, 1)))
sigma_b = np.transpose(np.tile(np.sqrt(var_b), (num_values, 1)))
# Fill in the values.
for i_order in range(num_orders):
if i_order == 0:
# First order takes the values as is.
x_a_ord[0:num_samples, :] = x_a
x_b_ord[0:num_samples, :] = x_b
else:
# Second order takes the variance.
tmp_a = x_a - np.transpose(np.tile(mu_a, (num_values, 1)))
tmp_b = x_b - np.transpose(np.tile(mu_b, (num_values, 1)))
if i_order > 1:
# Higher orders take the higher order moments, and also divide by sigma.
tmp_a = np.divide(tmp_a, sigma_a)
tmp_b = np.divide(tmp_b, sigma_b)
# Take the power and fill in the values.
tmp_a = np.power(tmp_a, i_order + 1)
tmp_b = np.power(tmp_b, i_order + 1)
x_a_ord[i_order * num_samples:(i_order + 1) * num_samples, :] = tmp_a
x_b_ord[i_order * num_samples:(i_order + 1) * num_samples, :] = tmp_b
# Compute Welch's t-test for all requested orders.
ttest = ttest1_hist_xy(x_a_ord, y_a_ord, x_b_ord, y_b_ord)
return np.reshape(ttest, (num_orders, num_samples))
def compute_statistics(num_orders, rnd_list, byte_list, histograms, x_axis):
""" Computing t-test statistics for a set of time samples.
"""
num_rnds = len(rnd_list)
num_bytes = len(byte_list)
num_samples = histograms.shape[3]
ttest_trace = np.zeros((num_orders, num_rnds, num_bytes, num_samples))
# Replicate the x_axis such that x has the same dimensions as fixed_set/random_set below.
x = np.tile(x_axis, (num_samples, 1))
# Compute statistics.
for i_rnd in range(num_rnds):
for i_byte in range(num_bytes):
# We do fixed vs. random.
fixed_set = histograms[i_rnd, i_byte, 0, :, :]
random_set = np.sum(histograms[i_rnd, i_byte, 1:, :, :], 0)
if not np.any(fixed_set != 0.0) or not np.any(random_set != 0.0):
# In case any of the sets is empty, the statistics can't be computed. This can
# happen if for example:
# - Few traces are used only.
# - The hamming distance is used as sensitive variable and the initial round is
# analyzed. Then the hamming distance can only be zero (fixed_set) or non-zero
# (random_set) if the corresponding key byte is zero or non-zero, respectively.
# Thus, either of the sets must be empty.
# We return NaN and handle it when checking all results.
ttest_trace[:, i_rnd, i_byte, :] = np.nan
continue
tmp = ttest_hist_xy(x, fixed_set, x, random_set, num_orders)
ttest_trace[:, i_rnd, i_byte, :] = tmp
return ttest_trace
def compute_histograms_general(trace_resolution, traces, leakage):
""" Building histograms for general fixed-vs-random TVLA.
For each time sample we make two histograms, one for the fixed and one for the random group.
Whether a trace belongs to the fixed or random group is indicated in the leakage input
variable. The value stored in histograms[v][w][x][y][z] shows how many traces have value z at
time y, given that trace is in the fixed (x = 1) or random (x = 0) group. The v and w indices
are not used but we keep them for code compatiblitly with non-general AES TVLA.
"""
num_leakages = 2
num_rnds = 1
num_bytes = 1
num_samples = traces.shape[1]
histograms = np.zeros((num_rnds, num_bytes, num_leakages, num_samples, trace_resolution),
dtype=np.uint32)
for i_sample in range(num_samples):
histograms[0, 0, :, i_sample, :] = np.histogram2d(
leakage, traces[:, i_sample],
bins=[range(num_leakages + 1), range(trace_resolution + 1)])[0]
return histograms
def compute_histograms_aes(trace_resolution, rnd_list, byte_list, traces, leakage):
""" Building histograms for AES.
For each time sample we make two histograms, one for Hamming weight of the sensitive variable
= 0 (fixed set) and one for Hamming weight > 0 (random set). The value stored in
histograms[v][w][x][y][z] shows how many traces have value z at time y, given that
HW(state byte w in AES round v) = 0 (fixed set, x = 0) or > 0 (random set, x = 1).
"""
num_leakages = 2
num_rnds = len(rnd_list)
num_bytes = len(byte_list)
num_samples = traces.shape[1]
histograms = np.zeros((num_rnds, num_bytes, num_leakages, num_samples, trace_resolution),
dtype=np.uint32)
for i_rnd in range(num_rnds):
for i_byte in range(num_bytes):
for i_sample in range(num_samples):
histograms[i_rnd, i_byte, :, i_sample, :] = np.histogram2d(
leakage[rnd_list[i_rnd], byte_list[i_byte], :], traces[:, i_sample],
bins=[np.append(range(num_leakages), 9), range(trace_resolution + 1)])[0]
return histograms
def compute_leakage_aes(keys, plaintexts, leakage_model):
"""
Sensitive variable is always byte-sized.
Two leakage models are available:
HAMMING_WEIGHT - based on the hamming weight of the state register byte.
HAMMING_DISTANCE - based on the hamming distance between the curent and previous state
for a specified byte.
"""
num_traces = len(keys)
leakage = np.zeros((11, 16, num_traces), dtype=np.uint8)
# Checks if all keys in the list are the same.
key_fixed = np.all(keys == keys[0])
subkey = np.zeros((11, 16))
if key_fixed:
for j in range(11):
subkey[j] = np.asarray(
aes_funcs.key_schedule_rounds(keys[0], 0, j))
subkey = subkey.astype(int)
for i in range(num_traces):
if not key_fixed:
for j in range(11):
subkey[j] = np.asarray(
aes_funcs.key_schedule_rounds(keys[i], 0, j))
subkey = subkey.astype(int)
# Init
state = plaintexts[i]
# Round 0
old_state = state
state = np.bitwise_xor(state, subkey[0])
for k in range(16):
if leakage_model == 'HAMMING_DISTANCE':
leakage[0][k][i] = bit_count(
np.bitwise_xor(state[k], old_state[k]))
else:
leakage[0][k][i] = bit_count(state[k])
# Round 1 - 10
for j in range(1, 11):
old_state = state
state = aes_funcs.subbytes(state)
state = aes_funcs.shiftrows(state)
if (j < 10):
state = aes_funcs.mixcolumns(state)
state = np.bitwise_xor(state, subkey[j])
for k in range(16):
if leakage_model == 'HAMMING_DISTANCE':
leakage[j][k][i] = bit_count(
| np.bitwise_xor(state[k], old_state[k]) | numpy.bitwise_xor |
import os
import gin
import sys
import torch
import numpy as np
import tensorflow as tf
from utils import device
from paths import META_DATASET_ROOT, META_RECORDS_ROOT, PROJECT_ROOT
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' # Quiet the TensorFlow warnings
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR) # Quiet the TensorFlow warnings
sys.path.append(os.path.abspath(META_DATASET_ROOT))
from meta_dataset.data import dataset_spec as dataset_spec_lib
from meta_dataset.data import learning_spec
from meta_dataset.data import pipeline
from meta_dataset.data import config
ALL_METADATASET_NAMES = "ilsvrc_2012 omniglot aircraft cu_birds dtd quickdraw fungi vgg_flower traffic_sign mscoco mnist cifar10 cifar100".split(' ')
TRAIN_METADATASET_NAMES = ALL_METADATASET_NAMES[:8]
TEST_METADATASET_NAMES = ALL_METADATASET_NAMES[-5:]
SPLIT_NAME_TO_SPLIT = {'train': learning_spec.Split.TRAIN,
'val': learning_spec.Split.VALID,
'test': learning_spec.Split.TEST}
class MetaDatasetReader(object):
def __init__(self, mode, train_set, validation_set, test_set):
assert (train_set is not None or validation_set is not None or test_set is not None)
self.data_path = META_RECORDS_ROOT
self.train_dataset_next_task = None
self.validation_set_dict = {}
self.test_set_dict = {}
self.specs_dict = {}
gin.parse_config_file(f"{PROJECT_ROOT}/data/meta_dataset_config.gin")
def _get_dataset_spec(self, items):
if isinstance(items, list):
dataset_specs = []
for dataset_name in items:
dataset_records_path = os.path.join(self.data_path, dataset_name)
dataset_spec = dataset_spec_lib.load_dataset_spec(dataset_records_path)
dataset_specs.append(dataset_spec)
return dataset_specs
else:
dataset_name = items
dataset_records_path = os.path.join(self.data_path, dataset_name)
dataset_spec = dataset_spec_lib.load_dataset_spec(dataset_records_path)
return dataset_spec
def _to_torch(self, sample):
for key, val in sample.items():
if isinstance(val, str):
continue
val = torch.from_numpy(val)
if 'image' in key:
val = val.permute(0, 3, 2, 1)
else:
val = val.long()
sample[key] = val.to(device)
return sample
def num_classes(self, split_name):
split = SPLIT_NAME_TO_SPLIT[split_name]
all_split_specs = self.specs_dict[SPLIT_NAME_TO_SPLIT['train']]
if not isinstance(all_split_specs, list):
all_split_specs = [all_split_specs]
total_n_classes = 0
for specs in all_split_specs:
total_n_classes += len(specs.get_classes(split))
return total_n_classes
def build_class_to_identity(self):
split = SPLIT_NAME_TO_SPLIT['train']
all_split_specs = self.specs_dict[SPLIT_NAME_TO_SPLIT['train']]
if not isinstance(all_split_specs, list):
all_split_specs = [all_split_specs]
self.cls_to_identity = dict()
self.dataset_id_to_dataset_name = dict()
self.dataset_to_n_cats = dict()
offset = 0
for dataset_id, specs in enumerate(all_split_specs):
dataset_name = specs.name
self.dataset_id_to_dataset_name[dataset_id] = dataset_name
n_cats = len(specs.get_classes(split))
self.dataset_to_n_cats[dataset_name] = n_cats
for cat in range(n_cats):
self.cls_to_identity[offset + cat] = (cat, dataset_id)
offset += n_cats
self.dataset_name_to_dataset_id = {v: k for k, v in
self.dataset_id_to_dataset_name.items()}
class MetaDatasetEpisodeReader(MetaDatasetReader):
"""
Class that wraps the Meta-Dataset episode readers.
"""
def __init__(self, mode, train_set=None, validation_set=None, test_set=None):
super(MetaDatasetEpisodeReader, self).__init__(mode, train_set, validation_set, test_set)
if mode == 'train':
train_episode_desscription = config.EpisodeDescriptionConfig(None, None, None)
self.train_dataset_next_task = self._init_multi_source_dataset(
train_set, SPLIT_NAME_TO_SPLIT['train'], train_episode_desscription)
if mode == 'val':
test_episode_desscription = config.EpisodeDescriptionConfig(None, None, None)
for item in validation_set:
next_task = self._init_single_source_dataset(
item, SPLIT_NAME_TO_SPLIT['val'], test_episode_desscription)
self.validation_set_dict[item] = next_task
if mode == 'test':
test_episode_desscription = config.EpisodeDescriptionConfig(None, None, None)
for item in test_set:
next_task = self._init_single_source_dataset(
item, SPLIT_NAME_TO_SPLIT['test'], test_episode_desscription)
self.test_set_dict[item] = next_task
def _init_multi_source_dataset(self, items, split, episode_description):
dataset_specs = self._get_dataset_spec(items)
self.specs_dict[split] = dataset_specs
use_bilevel_ontology_list = [False] * len(items)
use_dag_ontology_list = [False] * len(items)
# Enable ontology aware sampling for Omniglot and ImageNet.
if 'omniglot' in items:
use_bilevel_ontology_list[items.index('omniglot')] = True
if 'ilsvrc_2012' in items:
use_dag_ontology_list[items.index('ilsvrc_2012')] = True
multi_source_pipeline = pipeline.make_multisource_episode_pipeline(
dataset_spec_list=dataset_specs,
use_dag_ontology_list=use_dag_ontology_list,
use_bilevel_ontology_list=use_bilevel_ontology_list,
split=split,
episode_descr_config = episode_description,
image_size=84,
shuffle_buffer_size=1000)
iterator = multi_source_pipeline.make_one_shot_iterator()
return iterator.get_next()
def _init_single_source_dataset(self, dataset_name, split, episode_description):
dataset_spec = self._get_dataset_spec(dataset_name)
self.specs_dict[split] = dataset_spec
# Enable ontology aware sampling for Omniglot and ImageNet.
use_bilevel_ontology = False
if 'omniglot' in dataset_name:
use_bilevel_ontology = True
use_dag_ontology = False
if 'ilsvrc_2012' in dataset_name:
use_dag_ontology = True
single_source_pipeline = pipeline.make_one_source_episode_pipeline(
dataset_spec=dataset_spec,
use_dag_ontology=use_dag_ontology,
use_bilevel_ontology=use_bilevel_ontology,
split=split,
episode_descr_config=episode_description,
image_size=84,
shuffle_buffer_size=1000)
iterator = single_source_pipeline.make_one_shot_iterator()
return iterator.get_next()
def _get_task(self, next_task, session):
episode = session.run(next_task)[0]
task_dict = {
'context_images': episode[0],
'context_labels': episode[1],
'target_images': episode[3],
'target_labels': episode[4]
}
return self._to_torch(task_dict)
def get_train_task(self, session):
return self._get_task(self.train_dataset_next_task, session)
def get_validation_task(self, session, item=None):
item = item if item else list(self.validation_set_dict.keys())[0]
return self._get_task(self.validation_set_dict[item], session)
def get_test_task(self, session, item=None):
item = item if item else list(self.test_set_dict.keys())[0]
return self._get_task(self.test_set_dict[item], session)
class MetaDatasetBatchReader(MetaDatasetReader):
"""
Class that wraps the Meta-Dataset episode readers.
"""
def __init__(self, mode, train_set, validation_set, test_set, batch_size):
super(MetaDatasetBatchReader, self).__init__(mode, train_set, validation_set, test_set)
self.batch_size = batch_size
if mode == 'train':
self.train_dataset_next_task = self._init_multi_source_dataset(
train_set, SPLIT_NAME_TO_SPLIT['train'])
if mode == 'val':
for item in validation_set:
next_task = self.validation_dataset = self._init_single_source_dataset(
item, SPLIT_NAME_TO_SPLIT['val'])
self.validation_set_dict[item] = next_task
if mode == 'test':
for item in test_set:
next_task = self._init_single_source_dataset(
item, SPLIT_NAME_TO_SPLIT['test'])
self.test_set_dict[item] = next_task
self.build_class_to_identity()
def _init_multi_source_dataset(self, items, split):
dataset_specs = self._get_dataset_spec(items)
self.specs_dict[split] = dataset_specs
multi_source_pipeline = pipeline.make_multisource_batch_pipeline(
dataset_spec_list=dataset_specs, batch_size=self.batch_size,
split=split, image_size=84, add_dataset_offset=True, shuffle_buffer_size=1000)
iterator = multi_source_pipeline.make_one_shot_iterator()
return iterator.get_next()
def _init_single_source_dataset(self, dataset_name, split):
dataset_specs = self._get_dataset_spec(dataset_name)
self.specs_dict[split] = dataset_specs
multi_source_pipeline = pipeline.make_one_source_batch_pipeline(
dataset_spec=dataset_specs, batch_size=self.batch_size,
split=split, image_size=84)
iterator = multi_source_pipeline.make_one_shot_iterator()
return iterator.get_next()
def _get_batch(self, next_task, session):
episode = session.run(next_task)[0]
images, labels = episode[0], episode[1]
local_classes, dataset_ids = [], []
for label in labels:
local_class, dataset_id = self.cls_to_identity[label]
local_classes.append(local_class)
dataset_ids.append(dataset_id)
task_dict = {
'images': images,
'labels': labels,
'local_classes': | np.array(local_classes) | numpy.array |
# -*- coding: utf-8 -*-
"""
This module is used for calculations of the orthonormalization matrix for
the boundary wavelets.
The boundary_wavelets.py package is licensed under the MIT "Expat" license.
Copyright (c) 2019: <NAME> and <NAME>.
"""
# =============================================================================
# Imports
# =============================================================================
import numpy as np
from scipy.integrate import simps
import boundwave.boundary_wavelets as BW
# =============================================================================
# Functions
# =============================================================================
def integral(J, k, l, wavelet_coef, phi):
r'''
This function calculates the integral (16) numerically.
INPUT:
J : int
The scale.
k : int
The translation for the first function.
l : int
The translation for the second function.
wavelet_coef : numpy.float64
The wavelet coefficients, must sum to :math:`\sqrt{2}`.
For Daubechies 2 they can be found using
`np.flipud(pywt.Wavelet('db2').dec_lo)`.
phi : numpy.float64
The phi function, can be made with
`pywt.Wavelet(wavelet).wavefun(level=15)`.
OUTPUT:
out : int
The value of the integral.
'''
a = int(len(wavelet_coef) / 2)
OneStep = len(phi) // (2 * a - 1)
phiNorm = np.linalg.norm(BW.downsample(phi, 0, OneStep, J))
phi1 = BW.downsample(phi, k, OneStep, J) / phiNorm
phi2 = BW.downsample(phi, l, OneStep, J) / phiNorm
phiProd = phi1 * phi2
Integ = simps(phiProd)
return Integ
def m_alpha_beta(alpha, beta, J, wavelet_coef, inte_matrix, Side):
r'''
This function calculates an entry in the martix :math:`M` (15).
INPUT:
alpha : int
alpha
beta : int
beta
J : int
The scale.
wavelet_coef : numpy.float64
The wavelet coefficients, must sum to :math:`\sqrt{2}`. For
Daubechies 2 they can be found using
`np.flipud(pywt.Wavelet('db2').dec_lo`).
inte_matrix : numpy.float64
A matrix with the values for the integrals calculated with
the function :py:func:`integral` for k and l in the
interval [-2*a+2,0] or [2**J-2*a+1,2**J-1].
Side : str
`'L'` for left interval boundary and `'R'` for right
interval boundary.
OUTPUT:
M : numpy.float64
Entry (alpha,beta) of the martix M
'''
a = int(len(wavelet_coef) / 2)
Moment = BW.moments(wavelet_coef, a - 1)
M = 0
if Side == 'L':
interval = range(-2 * a + 2, 1)
i = 0
for k in interval:
j = 0
for m in interval:
M += (BW.inner_product_phi_x(alpha, 0, k, Moment) *
BW.inner_product_phi_x(beta, 0, m, Moment) *
inte_matrix[i, j])
j += 1
i += 1
elif Side == 'R':
interval = range(2**J - 2 * a + 1, 2**J)
i = 0
for k in interval:
j = 0
for m in interval:
M += (BW.inner_product_phi_x(alpha, 0, k, Moment) *
BW.inner_product_phi_x(beta, 0, m, Moment) *
inte_matrix[i, j] * 2**(-J * (alpha + beta)))
j += 1
i += 1
else:
print('You must choose a side')
return M
def ortho_matrix(J, wavelet_coef, phi):
r'''
This function findes the orthogonality matrix :math:`A`. First
uses the functions :py:func:`m_alpha_beta` and :py:func:`integral`
to make the matrix M. Then computes a Cholesky decomposition,
which is then inverted.
INPUT:
J : int
The scale.
wavelet_coef : numpy.float64
The wavelet coefficients, must sum to
:math:`\sqrt{2}`. For Daubechies 2 they can be found using
`np.flipud(pywt.Wavelet('db2').dec_lo)`.
phi : numpy.float64
The phi function, can be made with
`pywt.Wavelet(wavelet).wavefun(level=15)`.
OUTPUT:
AL : numpy.float64
Left orthonormalisation matrix; to be used in
:py:func:`boundwave.boundary_wavelets.boundary_wavelets` or
:py:func:`boundwave.fourier_boundary_wavelets.fourier_boundary_wavelets`.
AR : numpy.float64
Right orthonormalisation matrix; to be used in
:py:func:`boundwave.boundary_wavelets.boundary_wavelets` or
:py:func:`boundwave.fourier_boundary_wavelets.fourier_boundary_wavelets`.
'''
a = int(len(wavelet_coef) / 2)
ML = np.zeros((a, a))
MR = np.zeros((a, a))
InteL = np.zeros((2 * a - 1, 2 * a - 1))
k = 0
for i in range(-2 * a + 2, 1):
m = 0
for j in range(-2 * a + 2, i + 1):
InteL[k, m] = integral(J, i, j, wavelet_coef, phi)
InteL[m, k] = InteL[k, m]
m += 1
k += 1
InteR = np.zeros((2 * a - 1, 2 * a - 1))
k = 0
for i in range(2**J - 2 * a + 1, 2**J):
m = 0
for j in range(2**J - 2 * a + 1, i + 1):
InteR[k, m] = integral(J, i, j, wavelet_coef, phi)
InteR[m, k] = InteR[k, m]
m += 1
k += 1
for i in range(a):
for j in range(i + 1):
ML[i, j] = m_alpha_beta(i, j, J, wavelet_coef, InteL, 'L')
ML[j, i] = ML[i, j]
for i in range(a):
for j in range(i + 1):
MR[i, j] = m_alpha_beta(i, j, J, wavelet_coef, InteR, 'R')
MR[j, i] = MR[i, j]
h = 2**(J * np.arange(a))
CL = np.linalg.cholesky(ML)
AL = 2**(J / 2) * np.dot(np.linalg.inv(CL), np.diag(h))
CR = np.linalg.cholesky(MR)
U, S, V = | np.linalg.svd(CR) | numpy.linalg.svd |
import sys
import numpy as np
import matplotlib.pyplot as plt
from numpy import matlib
from pf_utils import load_config_vars, P_nk, P_dk, P_wk
# ############ Parameters ############
display_topics = 15
display_words = 12
display_groups = 2
# ############ Main code ############
# Load config file
config_fname = 'poisson.config'
if len(sys.argv) > 1:
config_fname = sys.argv[1]
print('Loading ' + config_fname)
variables = load_config_vars(config_fname)
KB = int(variables['NETWORK_TOPICS'])
KY = int(variables['CORPUS_TOPICS'])
model = variables['RUN_MODEL']
print('Displaying results for ' + model + '\n')
# Show topics
if model == 'cgppf' or model == 'jgppf':
print('Loading corpus expected values')
# Get corpus dimensions
with open(variables['CORPUS_TRAIN'], 'r') as fin:
line = fin.readline()
line_split = line.rstrip('\r\n').split('\t')
D = int(line_split[0])
V = int(line_split[1])
rkY = np.zeros(KY)
betawk = np.zeros((V,KY))
thetadk = np.zeros((D,KY))
# Load rkY
fname = variables['OUT_DIR'] + '/' + model.upper() + '/expectedValues/rkY.txt'
print(fname)
with open(fname, 'r') as fin:
for k,line in enumerate(fin):
rkY[k] = float(line.rstrip('\r\n'))
# Load betawk
fname = variables['OUT_DIR'] + '/' + model.upper() + '/expectedValues/betawk.txt'
print(fname)
with open(fname, 'r') as fin:
for k,line in enumerate(fin):
vals = line.rstrip('\r\n').split('\t')
for w in range(V):
betawk[w,k] = float(vals[w])
# Load thetadk
fname = variables['OUT_DIR'] + '/' + model.upper() + '/expectedValues/thetadk.txt'
print(fname)
with open(fname, 'r') as fin:
for k,line in enumerate(fin):
vals = line.rstrip('\r\n').split('\t')
for d in range(D):
thetadk[d,k] = float(vals[d])
# Load the dictionary
corpus_dictionary = dict()
if variables.get('CORPUS_DICTIONARY', ''):
with open(variables['CORPUS_DICTIONARY'], 'r') as fin:
for line in fin:
line_split = line.rstrip('\r\n').split('\t')
corpus_dictionary[int(line_split[0])] = line_split[1]
else:
for w in range(V):
corpus_dictionary[w] = str(w)
# Show the top words per topic
term_topic_joint = P_wk(rkY, thetadk, betawk)
topic_dist = np.sum(term_topic_joint, axis=0)
top_topics = np.argsort(topic_dist)
print('\nTop words per corpus topic:')
for k in range(display_topics):
topic = top_topics[-k-1]
top_words = np.argsort(term_topic_joint[:,topic])
strout = 'Topic ' + str(topic) + ' (' + str(topic_dist[topic]) + '):'
for w in range(display_words):
strout += ' ' + corpus_dictionary[top_words[-w-1]]
print(strout)
print(' ')
# Show network factors
if model == 'ngppf' or model == 'jgppf':
print('Loading network expected values')
# Get network dimensions
with open(variables['NETWORK_TRAIN'], 'r') as fin:
line = fin.readline()
line_split = line.rstrip('\r\n').split('\t')
N = int(line_split[0])
rkB = np.zeros(KB)
phink = np.zeros((N,KB))
# Load rkB
fname = variables['OUT_DIR'] + '/' + model.upper() + '/expectedValues/rkB.txt'
print(fname)
with open(fname, 'r') as fin:
for k,line in enumerate(fin):
rkB[k] = float(line.rstrip('\r\n'))
# Load phink
fname = variables['OUT_DIR'] + '/' + model.upper() + '/expectedValues/phink.txt'
print(fname)
with open(fname, 'r') as fin:
for k,line in enumerate(fin):
vals = line.rstrip('\r\n').split('\t')
for n in range(N):
phink[n,k] = float(vals[n])
# Load the dictionary
network_dictionary = dict()
if variables.get('NETWORK_DICTIONARY', ''):
with open(variables['NETWORK_DICTIONARY'], 'r') as fin:
for line in fin:
line_split = line.rstrip('\r\n').split('\t')
network_dictionary[int(line_split[0])] = line_split[1]
else:
for n in range(N):
network_dictionary[n] = str(n)
# Display the top authors per group
term_topic_joint = P_nk(rkB, phink)
topic_dist = np.sum(term_topic_joint, axis=0)
top_topics = np.argsort(topic_dist)
print('\nTop authors per network group:')
for k in range(display_groups):
topic = top_topics[-k-1]
top_words = np.argsort(term_topic_joint[:,topic])
strout = 'Group ' + str(topic) + ' (' + str(topic_dist[topic]) + '):'
for w in range(display_words):
strout += ' ' + network_dictionary[top_words[-w-1]]
print(strout)
print(' ')
# Display the matrices
plt.figure()
plt.subplot(1,2,1)
plt.plot(rkB)
plt.xlabel('Factors (kB)')
plt.title('r_kB')
plt.subplot(1,2,2)
plt.imshow(phink, cmap='jet') # Groups (network)
plt.xlabel('Factors (kB)')
plt.ylabel('Network vertices (n)')
plt.title('phi_nk')
plt.colorbar(shrink=0.7)
print(' ')
# Show joint results
if model == 'jgppf':
print('Loading joint modeling expected values')
psiwk = np.zeros((V,KB))
Znd = np.zeros((N,D))
# Load psiwk
fname = variables['OUT_DIR'] + '/' + model.upper() + '/expectedValues/psiwk.txt'
print(fname)
with open(fname, 'r') as fin:
for k,line in enumerate(fin):
vals = line.rstrip('\r\n').split('\t')
for w in range(V):
psiwk[w,k] = float(vals[w])
# Load Z
fname = variables['AUTHORS_TRAIN']
print(fname)
with open(fname, 'r') as fin:
for line in fin:
vals = line.rstrip('\r\n').split('\t')
author_id = int(vals[0])
doc_id = int(vals[1])
Znd[author_id, doc_id] = 1
# Display the top words per group
Zphidk = np.array(np.matrix(Znd.T) * np.matrix(phink))
term_topic_joint = P_wk(rkB, Zphidk, psiwk)
topic_dist = np.sum(term_topic_joint, axis=0)
top_topics = | np.argsort(topic_dist) | numpy.argsort |
'''
Script for plotting thin client runtimes
'''
from collections import OrderedDict as od
from matplotlib import pyplot as plt
import numpy as np
import pickle
import sys
colors_full = ['#1abc9c','#16a085','#27ae60','#2ecc71',' #f1c40f']
colors_group = ['#c0392b' ,'#e74c3c', '#ec7063','#8e44ad']
lines = ['-','--','-.',':']
all_colors = ['blue','green','magenta','cyan','lime','red','orange','black']
n_parties = 100
n_computes = [0,2,4,8]
plot_fold_inc = 1
prefix = '../test_results/runtime_tests/bounded_swor_thin_536_test1/'
all_tests = [
'no-crypto',
'compute2',
'compute4',
'compute8',
]
lr = '0.0009'
n_repeats = 5
res = {}
res['crypto_times'] = od()
res['runtimes'] = od()
res['runtimes']['total'] = od()
res['runtimes']['sum'] = od()
res['runtimes']['min*iters'] = od()
res['runtimes']['median*iters'] = od()
for i_test,test in enumerate(all_tests):
res['crypto_times'][test] = np.zeros(n_repeats)-1
res['runtimes']['total'][test] = np.zeros(n_repeats)
res['runtimes']['sum'][test] = np.zeros(n_repeats)
res['runtimes']['min*iters'][test] = | np.zeros(n_repeats) | numpy.zeros |
from osgeo import gdal
import os
import sys
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
import pandas as pd
country = 'denmark'
if country == 'france':
geotif_2015 = gdal.Open(r'C:\Users\Niels\Documents\GitHub\PopNet\data\france\2015.tif')
geotif_2020 = gdal.Open(r'C:\Users\Niels\Documents\GitHub\PopNet\experiments\france\final\pred_2020.tif')
geotif_2030 = gdal.Open(r'C:\Users\Niels\Documents\GitHub\PopNet\experiments\france\final\pred_2030.tif')
geotif_2040 = gdal.Open(r'C:\Users\Niels\Documents\GitHub\PopNet\experiments\france\final\pred_2040.tif')
geotif_2050 = gdal.Open(r'C:\Users\Niels\Documents\GitHub\PopNet\experiments\france\final\pred_2050.tif')
geotif_2060 = gdal.Open(r'C:\Users\Niels\Documents\GitHub\PopNet\experiments\france\final\pred_2060.tif')
geotif_2070 = gdal.Open(r'C:\Users\Niels\Documents\GitHub\PopNet\experiments\france\final\pred_2070.tif')
geotif_2080 = gdal.Open(r'C:\Users\Niels\Documents\GitHub\PopNet\experiments\france\final\pred_2080.tif')
geotif_2090 = gdal.Open(r'C:\Users\Niels\Documents\GitHub\PopNet\experiments\france\final\pred_2090.tif')
geotif_2100 = gdal.Open(r'C:\Users\Niels\Documents\GitHub\PopNet\experiments\france\final\pred_2100.tif')
if country == 'denmark':
# geotif_2015 = gdal.Open(r'C:\Users\Niels\Documents\GitHub\PopNet\data\denmark\2015.tif')
# geotif_2020_fr = gdal.Open(r'C:\Users\Niels\Documents\GitHub\PopNet\experiments\denmark\france_model\outputs\comparison_fr\bbox_pred_2020.tif')
# geotif_2030_fr = gdal.Open(r'C:\Users\Niels\Documents\GitHub\PopNet\experiments\denmark\france_model\outputs\comparison_fr\bbox_pred_2030.tif')
# geotif_2040_fr = gdal.Open(r'C:\Users\Niels\Documents\GitHub\PopNet\experiments\denmark\france_model\outputs\comparison_fr\bbox_pred_2040.tif')
# geotif_2050_fr = gdal.Open(r'C:\Users\Niels\Documents\GitHub\PopNet\experiments\denmark\france_model\outputs\comparison_fr\bbox_pred_2050.tif')
# geotif_2060_fr = gdal.Open(r'C:\Users\Niels\Documents\GitHub\PopNet\experiments\denmark\france_model\outputs\comparison_fr\bbox_pred_2060.tif')
# geotif_2070_fr = gdal.Open(r'C:\Users\Niels\Documents\GitHub\PopNet\experiments\denmark\france_model\outputs\comparison_fr\bbox_pred_2070.tif')
# geotif_2080_fr = gdal.Open(r'C:\Users\Niels\Documents\GitHub\PopNet\experiments\denmark\france_model\outputs\comparison_fr\bbox_pred_2080.tif')
# geotif_2090_fr = gdal.Open(r'C:\Users\Niels\Documents\GitHub\PopNet\experiments\denmark\france_model\outputs\comparison_fr\bbox_pred_2090.tif')
# geotif_2100_fr = gdal.Open(r'C:\Users\Niels\Documents\GitHub\PopNet\experiments\denmark\france_model\outputs\comparison_fr\bbox_pred_2100.tif')
#
# geotif_2020_dk = gdal.Open(r'C:\Users\Niels\Documents\GitHub\PopNet\experiments\denmark\france_model\outputs\comparison_dk\bbox_pred_2020.tif')
# geotif_2030_dk = gdal.Open(r'C:\Users\Niels\Documents\GitHub\PopNet\experiments\denmark\france_model\outputs\comparison_dk\bbox_pred_2030.tif')
# geotif_2040_dk = gdal.Open(r'C:\Users\Niels\Documents\GitHub\PopNet\experiments\denmark\france_model\outputs\comparison_dk\bbox_pred_2040.tif')
# geotif_2050_dk = gdal.Open(r'C:\Users\Niels\Documents\GitHub\PopNet\experiments\denmark\france_model\outputs\comparison_dk\bbox_pred_2050.tif')
# geotif_2060_dk = gdal.Open(r'C:\Users\Niels\Documents\GitHub\PopNet\experiments\denmark\france_model\outputs\comparison_dk\bbox_pred_2060.tif')
# geotif_2070_dk = gdal.Open(r'C:\Users\Niels\Documents\GitHub\PopNet\experiments\denmark\france_model\outputs\comparison_dk\bbox_pred_2070.tif')
# geotif_2080_dk = gdal.Open(r'C:\Users\Niels\Documents\GitHub\PopNet\experiments\denmark\france_model\outputs\comparison_dk\bbox_pred_2080.tif')
# geotif_2090_dk = gdal.Open(r'C:\Users\Niels\Documents\GitHub\PopNet\experiments\denmark\france_model\outputs\comparison_dk\bbox_pred_2090.tif')
# geotif_2100_dk = gdal.Open(r'C:\Users\Niels\Documents\GitHub\PopNet\experiments\denmark\france_model\outputs\comparison_dk\bbox_pred_2100.tif')
geotif_2015 = gdal.Open(r'C:\Users\Niels\Documents\GitHub\PopNet\data\denmark\2015.tif')
geotif_2020_lake = gdal.Open(
r'C:\Users\Niels\Documents\GitHub\PopNet\experiments\denmark\road\outputs\bbox\bbox_pred_2020.tif')
geotif_2050_lake = gdal.Open(
r'C:\Users\Niels\Documents\GitHub\PopNet\experiments\denmark\road\outputs\bbox\bbox_pred_2050.tif')
geotif_2100_lake = gdal.Open(
r'C:\Users\Niels\Documents\GitHub\PopNet\experiments\denmark\road\outputs\bbox\bbox_pred_2100.tif')
geotif_2020_fin = gdal.Open(
r'C:\Users\Niels\Documents\GitHub\PopNet\experiments\denmark\final\outputs\bbox\bbox_pred_2020.tif')
geotif_2050_fin = gdal.Open(
r'C:\Users\Niels\Documents\GitHub\PopNet\experiments\denmark\final\outputs\bbox\bbox_pred_2050.tif')
geotif_2100_fin = gdal.Open(
r'C:\Users\Niels\Documents\GitHub\PopNet\experiments\denmark\final\outputs\bbox\bbox_pred_2100.tif')
# geotif_2030 = gdal.Open(r'C:\Users\Niels\Documents\GitHub\PopNet\experiments\france\final\pred_2030.tif')
# geotif_2040 = gdal.Open(r'C:\Users\Niels\Documents\GitHub\PopNet\experiments\france\final\pred_2040.tif')
geotif_2050 = gdal.Open(r'C:\Users\Niels\Documents\GitHub\PopNet\experiments\denmark\final\pred_2050.tif')
# geotif_2060 = gdal.Open(r'C:\Users\Niels\Documents\GitHub\PopNet\experiments\france\final\pred_2060.tif')
# geotif_2070 = gdal.Open(r'C:\Users\Niels\Documents\GitHub\PopNet\experiments\france\final\pred_2070.tif')
# geotif_2080 = gdal.Open(r'C:\Users\Niels\Documents\GitHub\PopNet\experiments\france\final\pred_2080.tif')
# geotif_2090 = gdal.Open(r'C:\Users\Niels\Documents\GitHub\PopNet\experiments\france\final\pred_2090.tif')
geotif_2100 = gdal.Open(r'C:\Users\Niels\Documents\GitHub\PopNet\experiments\denmark\final\pred_2100.tif')
# np_2015 = np.array(geotif_2015.GetRasterBand(1).ReadAsArray()).flatten()
np_2020_lake = np.array(geotif_2020_lake.GetRasterBand(1).ReadAsArray()).flatten()
# np_2030 = np.array(geotif_2030.GetRasterBand(1).ReadAsArray()).flatten()
# np_2040 = np.array(geotif_2040.GetRasterBand(1).ReadAsArray()).flatten()
np_2050_lake = np.array(geotif_2050_lake.GetRasterBand(1).ReadAsArray()).flatten()
# np_2060 = np.array(geotif_2060.GetRasterBand(1).ReadAsArray()).flatten()
# np_2070 = np.array(geotif_2070.GetRasterBand(1).ReadAsArray()).flatten()
# np_2080 = np.array(geotif_2080.GetRasterBand(1).ReadAsArray()).flatten()
# np_2090 = np.array(geotif_2090.GetRasterBand(1).ReadAsArray()).flatten()
np_2100_lake = np.array(geotif_2100_lake.GetRasterBand(1).ReadAsArray()).flatten()
np_2020_fin = np.array(geotif_2020_fin.GetRasterBand(1).ReadAsArray()).flatten()
np_2050_fin = np.array(geotif_2050_fin.GetRasterBand(1).ReadAsArray()).flatten()
np_2100_fin = np.array(geotif_2100_fin.GetRasterBand(1).ReadAsArray()).flatten()
# geotif_2020_dk = gdal.Open(r'C:\Users\Niels\Documents\GitHub\PopNet\experiments\denmark\final\pred_2020.tif')
# geotif_2030_dk = gdal.Open(r'C:\Users\Niels\Documents\GitHub\PopNet\experiments\denmark\final\pred_2030.tif')
# geotif_2040_dk = gdal.Open(r'C:\Users\Niels\Documents\GitHub\PopNet\experiments\denmark\final\pred_2040.tif')
# geotif_2050_dk = gdal.Open(r'C:\Users\Niels\Documents\GitHub\PopNet\experiments\denmark\final\pred_2050.tif')
# geotif_2060_dk = gdal.Open(r'C:\Users\Niels\Documents\GitHub\PopNet\experiments\denmark\final\pred_2060.tif')
# geotif_2070_dk = gdal.Open(r'C:\Users\Niels\Documents\GitHub\PopNet\experiments\denmark\final\pred_2070.tif')
# geotif_2080_dk = gdal.Open(r'C:\Users\Niels\Documents\GitHub\PopNet\experiments\denmark\final\pred_2080.tif')
# geotif_2090_dk = gdal.Open(r'C:\Users\Niels\Documents\GitHub\PopNet\experiments\denmark\final\pred_2090.tif')
# geotif_2100_dk = gdal.Open(r'C:\Users\Niels\Documents\GitHub\PopNet\experiments\denmark\final\pred_2100.tif')
# np_2015_fr = np.array(geotif_2015_fr.GetRasterBand(1).ReadAsArray()).flatten()
# np_2020_fr = np.array(geotif_2020_fr.GetRasterBand(1).ReadAsArray()).flatten()
# np_2030_fr = np.array(geotif_2030_fr.GetRasterBand(1).ReadAsArray()).flatten()
# np_2040_fr = np.array(geotif_2040_fr.GetRasterBand(1).ReadAsArray()).flatten()
# np_2050_fr = np.array(geotif_2050_fr.GetRasterBand(1).ReadAsArray()).flatten()
# np_2060_fr = np.array(geotif_2060_fr.GetRasterBand(1).ReadAsArray()).flatten()
# np_2070_fr = np.array(geotif_2070_fr.GetRasterBand(1).ReadAsArray()).flatten()
# np_2080_fr = np.array(geotif_2080_fr.GetRasterBand(1).ReadAsArray()).flatten()
# np_2090_fr = np.array(geotif_2090_fr.GetRasterBand(1).ReadAsArray()).flatten()
# np_2100_fr = np.array(geotif_2100_fr.GetRasterBand(1).ReadAsArray()).flatten()
# np_2020_dk = np.array(geotif_2020_dk.GetRasterBand(1).ReadAsArray()).flatten()
# np_2030_dk = np.array(geotif_2030_dk.GetRasterBand(1).ReadAsArray()).flatten()
# np_2040_dk = np.array(geotif_2040_dk.GetRasterBand(1).ReadAsArray()).flatten()
# np_2050_dk = np.array(geotif_2050_dk.GetRasterBand(1).ReadAsArray()).flatten()
# np_2060_dk = np.array(geotif_2060_dk.GetRasterBand(1).ReadAsArray()).flatten()
# np_2070_dk = np.array(geotif_2070_dk.GetRasterBand(1).ReadAsArray()).flatten()
# np_2080_dk = np.array(geotif_2080_dk.GetRasterBand(1).ReadAsArray()).flatten()
# np_2090_dk = np.array(geotif_2090_dk.GetRasterBand(1).ReadAsArray()).flatten()
# np_2100_dk = np.array(geotif_2100_dk.GetRasterBand(1).ReadAsArray()).flatten()
# print(np.max(np_2015))
# print(np.max(np_2020))
# print(np.max(np_2030))
# print(np.max(np_2040))
# print(np.max(np_2050))
# print(np.max(np_2060))
# print(np.max(np_2070))
# print(np.max(np_2080))
# print(np.max(np_2090))
# print(np.max(np_2100))
# population = np.concatenate((np_2020_fr, np_2030_fr))
# year = np.concatenate((np.full(np_2020_fr.shape, '2020'), np.full(np_2030_fr.shape, '2030')))
# country = np.concatenate((np.full(np_2020_fr.shape, 'France'), np.full(np_2030_fr.shape, 'France')))
#
# population = np.concatenate((population, np_2040_fr))
# year = np.concatenate((year, np.full(np_2040_fr.shape, '2040')))
# country = np.concatenate((country, np.full(np_2040_fr.shape, 'France')))
#
# population = np.concatenate((population, np_2050_fr))
# year = np.concatenate((year, np.full(np_2050_fr.shape, '2050')))
# country = np.concatenate((country, np.full(np_2050_fr.shape, 'France')))
#
# population = np.concatenate((population, np_2060_fr))
# year = np.concatenate((year, np.full(np_2060_fr.shape, '2060')))
# country = np.concatenate((country, np.full(np_2060_fr.shape, 'France')))
#
# population = np.concatenate((population, np_2070_fr))
# year = np.concatenate((year, np.full(np_2070_fr.shape, '2070')))
# country = np.concatenate((country, np.full(np_2070_fr.shape, 'France')))
#
# population = np.concatenate((population, np_2080_fr))
# year = np.concatenate((year, np.full(np_2080_fr.shape, '2080')))
# country = np.concatenate((country, np.full(np_2080_fr.shape, 'France')))
#
# population = np.concatenate((population, np_2090_fr))
# year = np.concatenate((year, np.full(np_2090_fr.shape, '2090')))
# country = np.concatenate((country, np.full(np_2090_fr.shape, 'France')))
#
# population = np.concatenate((population, np_2100_fr))
# year = np.concatenate((year, np.full(np_2100_fr.shape, '2100')))
# country = np.concatenate((country, np.full(np_2100_fr.shape, 'France')))
#
# population = np.concatenate((population, np_2020_dk))
# year = np.concatenate((year, np.full(np_2020_dk.shape, '2020')))
# country = np.concatenate((country, np.full(np_2020_dk.shape, 'Denmark')))
#
# population = np.concatenate((population, np_2030_dk))
# year = np.concatenate((year, np.full(np_2030_dk.shape, '2030')))
# country = np.concatenate((country, np.full(np_2030_dk.shape, 'Denmark')))
#
# population = np.concatenate((population, np_2040_dk))
# year = np.concatenate((year, np.full(np_2040_dk.shape, '2040')))
# country = np.concatenate((country, np.full(np_2040_dk.shape, 'Denmark')))
#
# population = np.concatenate((population, np_2050_dk))
# year = np.concatenate((year, np.full(np_2050_dk.shape, '2050')))
# country = np.concatenate((country, np.full(np_2050_dk.shape, 'Denmark')))
#
# population = np.concatenate((population, np_2060_dk))
# year = np.concatenate((year, np.full(np_2060_dk.shape, '2060')))
# country = np.concatenate((country, np.full(np_2060_dk.shape, 'Denmark')))
#
# population = np.concatenate((population, np_2070_dk))
# year = np.concatenate((year, np.full(np_2070_dk.shape, '2070')))
# country = np.concatenate((country, np.full(np_2070_dk.shape, 'Denmark')))
#
# population = np.concatenate((population, np_2080_dk))
# year = np.concatenate((year, np.full(np_2080_dk.shape, '2080')))
# country = np.concatenate((country, np.full(np_2080_dk.shape, 'Denmark')))
#
# population = np.concatenate((population, np_2090_dk))
# year = np.concatenate((year, np.full(np_2090_dk.shape, '2090')))
# country = np.concatenate((country, np.full(np_2090_dk.shape, 'Denmark')))
#
# population = np.concatenate((population, np_2100_dk))
# year = np.concatenate((year, np.full(np_2100_dk.shape, '2100')))
# country = np.concatenate((country, np.full(np_2100_dk.shape, 'Denmark')))
#
# print(population.shape)
# print(year.shape)
population = np.concatenate((np_2020_lake, np_2050_lake))
year = np.concatenate((np.full(np_2020_lake.shape, '2020'), np.full(np_2050_lake.shape, '2050')))
scenario = np.concatenate((np.full(np_2020_lake.shape, 'With road'), np.full(np_2050_lake.shape, 'With road')))
# population = np.concatenate((population, np_2040))
# year = np.concatenate((year, np.full(np_2040.shape, '2040')))
# population = np.concatenate((population, np_2050))
# year = np.concatenate((year, np.full(np_2050.shape, '2050')))
# population = np.concatenate((population, np_2060))
# year = np.concatenate((year, np.full(np_2060.shape, '2060')))
# population = np.concatenate((population, np_2070))
# year = np.concatenate((year, np.full(np_2070.shape, '2070')))
# population = np.concatenate((population, np_2080))
# year = np.concatenate((year, np.full(np_2080.shape, '2080')))
# population = np.concatenate((population, np_2090))
# year = np.concatenate((year, np.full(np_2090.shape, '2090')))
population = | np.concatenate((population, np_2100_lake)) | numpy.concatenate |
#!/usr/bin/env python
# Copyright 2019 Division of Medical Image Computing, German Cancer Research Center (DKFZ).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""
Parts are based on https://github.com/multimodallearning/pytorch-mask-rcnn
published under MIT license.
"""
import warnings
warnings.filterwarnings('ignore', '.*From scipy 0.13.0, the output shape of zoom()*')
import numpy as np
import scipy.misc
import scipy.ndimage
import scipy.interpolate
from scipy.ndimage.measurements import label as lb
import torch
import tqdm
from custom_extensions.nms import nms
from custom_extensions.roi_align import roi_align
############################################################
# Segmentation Processing
############################################################
def sum_tensor(input, axes, keepdim=False):
axes = np.unique(axes)
if keepdim:
for ax in axes:
input = input.sum(ax, keepdim=True)
else:
for ax in sorted(axes, reverse=True):
input = input.sum(int(ax))
return input
def get_one_hot_encoding(y, n_classes):
"""
transform a numpy label array to a one-hot array of the same shape.
:param y: array of shape (b, 1, y, x, (z)).
:param n_classes: int, number of classes to unfold in one-hot encoding.
:return y_ohe: array of shape (b, n_classes, y, x, (z))
"""
dim = len(y.shape) - 2
if dim == 2:
y_ohe = np.zeros((y.shape[0], n_classes, y.shape[2], y.shape[3])).astype('int32')
elif dim == 3:
y_ohe = np.zeros((y.shape[0], n_classes, y.shape[2], y.shape[3], y.shape[4])).astype('int32')
else:
raise Exception("invalid dimensions {} encountered".format(y.shape))
for cl in np.arange(n_classes):
y_ohe[:, cl][y[:, 0] == cl] = 1
return y_ohe
def dice_per_batch_inst_and_class(pred, y, n_classes, convert_to_ohe=True, smooth=1e-8):
'''
computes dice scores per batch instance and class.
:param pred: prediction array of shape (b, 1, y, x, (z)) (e.g. softmax prediction with argmax over dim 1)
:param y: ground truth array of shape (b, 1, y, x, (z)) (contains int [0, ..., n_classes]
:param n_classes: int
:return: dice scores of shape (b, c)
'''
if convert_to_ohe:
pred = get_one_hot_encoding(pred, n_classes)
y = get_one_hot_encoding(y, n_classes)
axes = tuple(range(2, len(pred.shape)))
intersect = np.sum(pred*y, axis=axes)
denominator = | np.sum(pred, axis=axes) | numpy.sum |
import json
from typing import Any
import numpy as np
from datetime import date, datetime
from db.db import DataBase, CarType
from pathlib import Path
from utils import dmy_from_date
from functools import lru_cache
class JSONDataBase(DataBase):
def __init__(self, path_to_db: str):
self.__path_to_db = path_to_db
def get_file_path(self, collect_day: date, departure_day: date) -> Path:
return (Path.cwd() /
self.__path_to_db /
dmy_from_date(collect_day) /
(dmy_from_date(departure_day) + '.json'))
@lru_cache
def open_json_file(self, collect_day: date, departure_day: date):
'''Returns mapping from "from_code" to
mapping from "where_code" to trains
'''
file = self.get_file_path(collect_day, departure_day)
if not file.is_file():
raise FileExistsError(f"File {file} does not exist")
with open(file, 'r') as f:
root = json.load(f)
data_mapped: dict[str | int, Any] = {}
for train_pair in root:
for trains in train_pair:
train_list = [train for train in trains['list'] if datetime.strptime(train['date0'], '%d.%m.%Y').date() == departure_day]
for train in trains['list']:
# Create list of car types:
cars = train['cars'] + train.get('seatCars', [])
car_type_list = {car['type'] for car in cars}
train.update({'car_type_list': car_type_list})
# Create map: fromCode -> whereCode
from_code = int(trains['fromCode'])
if from_code not in data_mapped:
data_mapped[from_code] = {}
# Create map: whereCode -> train
data_mapped[from_code].update({
int(trains['whereCode']): train_list
})
return data_mapped
def get_trip_cost(self,
collect_day: date,
departure_day: date,
from_code: int,
to_code: int,
train_number: str,
car_type: CarType):
'''
Returns: min cost, max cost, avg cost
Returns None if no such trains found
'''
data = self.open_json_file(collect_day, departure_day)
if from_code not in data or to_code not in data[from_code]:
return None
train_list = data[from_code][to_code]
available_costs: np.ndarray = np.array([], dtype=int)
trains = (t for t in train_list if t['number'] == train_number)
for train in trains:
cars = train['cars']
cars += train.get('seatCars', [])
for car in cars:
if car['type'] == car_type.value:
available_costs = np.append(available_costs,
int(car['tariff']))
if available_costs.size == 0:
return None
return (np.min(available_costs),
np.max(available_costs),
| np.average(available_costs) | numpy.average |
# -*- coding: utf-8 -*-
"""
Created on Tue Aug 25 14:35:26 2020
@author: cheritie
"""
import matplotlib.pyplot as plt
import numpy as np
import scipy.ndimage as sp
from AO_modules.calibration.InteractionMatrix import interactionMatrix
from AO_modules.MisRegistration import MisRegistration
from AO_modules.mis_registration_identification_algorithm.computeMetaSensitivyMatrix import computeMetaSensitivityMatrix
from AO_modules.mis_registration_identification_algorithm.applyMisRegistration import applyMisRegistration, apply_shift_wfs
from AO_modules.tools.interpolateGeometricalTransformation import rotateImageMatrix,rotation,translationImageMatrix,translation,anamorphosis,anamorphosisImageMatrix
import skimage.transform as sk
"""
def estimateMisRegistration(nameFolder, nameSystem, tel, atm, ngs, dm_0, wfs, basis, calib_in, misRegistrationZeroPoint, epsilonMisRegistration, param, precision = 3, gainEstimation = 1, return_all = False):
Compute the set of sensitivity matrices required to identify the mis-registrations.
%%%%%%%%%%%%%%%% -- INPUTS -- %%%%%%%%%%%%%%%%
_ nameFolder : folder to store the sensitivity matrices.
_ nameSystem : name of the AO system considered. For instance 'ELT_96x96_R_band'
_ tel : telescope object
_ atm : atmosphere object
_ ngs : source object
_ dm_0 : deformable mirror with reference configuration of mis-registrations
_ pitch : pitch of the dm in [m]
_ wfs : wfs object
_ basis : basis to use to compute the sensitivity matrices. Basis should be an object with the following fields:
basis.modes : [nActuator x nModes] matrix containing the commands to apply the modal basis on the dm
basis.indexModes : indexes of the modes considered in the basis. This is used to name the sensitivity matrices
basis.extra : extra name to name the sensitivity matrices for instance 'KL'
_ precision : precision to round the parameter estimation. Equivalent to np.round(misReg_estimation,precision)
_ gainEstimation : gain to apply after one estimation. eventually allows to avoid overshoots.
_ return_all : if true, returns all the estimations at every step of the algorithm
_ misRegistrationZeroPoint : mis-registration around which you want to compute the sensitivity matrices
_ epsilonMisRegistration : epsilon value to apply
_ param : dictionnary used as parameter file
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
The function returns the meta-sensitivity matrix that contains all the individual sensitivity matrices reshaped as a vector and concatenated.
%%%%%%%%%%%%%%%% -- OUTPUTS -- %%%%%%%%%%%%%%%%
_ misRegistration_out : mis-registration object corresponding to the convergence value
_ scalingFactor_values : scaling factor (for each mode of the modal basis) for each iteration to take into consideration eventual gains variation between data and model
_ misRegistration_values : mis-registration values for each iteration
"""
def estimateMisRegistration(nameFolder, nameSystem, tel, atm, ngs, dm_0, wfs, basis, calib_in, misRegistrationZeroPoint, epsilonMisRegistration, param, precision = 3, gainEstimation = 1, sensitivity_matrices = None, return_all = False, fast = False, wfs_mis_registrated = None, nIteration = 3):
#%% ---------- LOAD/COMPUTE SENSITIVITY MATRICES --------------------
# compute the sensitivity matrices. if the data already exits, the files will be loaded
# WARNING: The data are loaded only if the name of the requeste files matches the ones in argument of this function.
# make sure that these files are well corresponding to the system you are working with.
if sensitivity_matrices is None:
[metaMatrix,calib_0] = computeMetaSensitivityMatrix(nameFolder = nameFolder,\
nameSystem = nameSystem,\
tel = tel,\
atm = atm,\
ngs = ngs,\
dm_0 = dm_0,\
pitch = dm_0.pitch,\
wfs = wfs,\
basis = basis,\
misRegistrationZeroPoint = misRegistrationZeroPoint,\
epsilonMisRegistration = epsilonMisRegistration,\
param = param,\
wfs_mis_registrated = wfs_mis_registrated)
else:
metaMatrix = sensitivity_matrices
#%% ---------- ITERATIVE ESTIMATION OF THE PARAMETERS --------------------
stroke = 1e-12
criteria = 0
n_mis_reg = metaMatrix.M.shape[0]
misRegEstBuffer = np.zeros(n_mis_reg)
scalingFactor_values = [1]
misRegistration_values = [np.zeros(n_mis_reg)]
epsilonMisRegistration_field = ['shiftX','shiftY','rotationAngle','radialScaling','tangentialScaling']
i=0
tel.isPaired = False
misRegistration_out = MisRegistration(misRegistrationZeroPoint)
if fast:
from AO_modules.calibration.InteractionMatrix import interactionMatrixFromPhaseScreen
dm_0.coefs = np.squeeze(basis.modes)
tel*dm_0
input_modes_0 = dm_0.OPD
input_modes_cp = input_modes_0.copy()
while criteria ==0:
i=i+1
# temporary deformable mirror
if np.ndim(input_modes_0)==2:
if wfs_mis_registrated is not None:
misRegistration_wfs = MisRegistration()
misRegistration_wfs.shiftX = misRegistration_out.shiftX
misRegistration_wfs.shiftY = misRegistration_out.shiftY
misRegistration_dm = MisRegistration()
misRegistration_dm.rotationAngle = misRegistration_out.rotationAngle
apply_shift_wfs(wfs, misRegistration_wfs.shiftX / (wfs.nSubap/wfs.telescope.D), misRegistration_wfs.shiftY/ (wfs.nSubap/wfs.telescope.D))
input_modes_cp = tel.pupil*apply_mis_reg(tel,input_modes_0, misRegistration_dm)
else:
input_modes_cp = tel.pupil*apply_mis_reg(tel,input_modes_0, misRegistration_out)
else:
for i_modes in range(input_modes_0.shape[2]):
if wfs_mis_registrated is not None:
misRegistration_wfs = MisRegistration()
misRegistration_wfs.shiftX = misRegistration_out.shiftX
misRegistration_wfs.shiftY = misRegistration_out.shiftY
misRegistration_dm = MisRegistration()
misRegistration_dm.rotationAngle = misRegistration_out.rotationAngle
apply_shift_wfs(wfs, misRegistration_wfs.shiftX / (wfs.nSubap/wfs.telescope.D), misRegistration_wfs.shiftY/ (wfs.nSubap/wfs.telescope.D))
input_modes_cp[:,:,i_modes] = tel.pupil*apply_mis_reg(tel,input_modes_0[:,:,i_modes], misRegistration_dm)
else:
input_modes_cp[:,:,i_modes] = tel.pupil*apply_mis_reg(tel,input_modes_0[:,:,i_modes], misRegistration_out)
# temporary interaction matrix
calib_tmp = interactionMatrixFromPhaseScreen(ngs,atm,tel,wfs,input_modes_cp,stroke,phaseOffset=0,nMeasurements=50,invert=False,print_time=False)
# temporary scaling factor
try:
scalingFactor_tmp = np.round(np.diag(calib_tmp.D.T@calib_in.D)/ np.diag(calib_tmp.D.T@calib_tmp.D),precision)
# temporary mis-registration
misReg_tmp = gainEstimation*np.matmul(metaMatrix.M,np.reshape( [email protected](1/scalingFactor_tmp) - calib_tmp.D ,calib_in.D.shape[0]*calib_in.D.shape[1]))
except:
scalingFactor_tmp = np.round(np.sum(np.squeeze(calib_tmp.D)*np.squeeze(calib_in.D))/ np.sum(np.squeeze(calib_tmp.D)*np.squeeze(calib_tmp.D)),precision)
# temporary mis-registration
misReg_tmp = gainEstimation*np.matmul(metaMatrix.M,np.squeeze((np.squeeze(calib_in.D)*(1/scalingFactor_tmp)) - np.squeeze(calib_tmp.D)))
# cumulative mis-registration
misRegEstBuffer+= np.round(misReg_tmp,precision)
# define the next working point to adjust the scaling factor
for i_mis_reg in range(n_mis_reg):
setattr(misRegistration_out, epsilonMisRegistration_field[i_mis_reg], getattr(misRegistration_out, epsilonMisRegistration_field[i_mis_reg]) + np.round(misReg_tmp[i_mis_reg],precision))
# save the data for each iteration
scalingFactor_values.append(np.copy(scalingFactor_tmp))
misRegistration_values.append(np.copy(misRegEstBuffer))
if i==nIteration:
criteria =1
else:
while criteria ==0:
i=i+1
# temporary deformable mirror
dm_tmp = applyMisRegistration(tel,misRegistration_out,param, wfs = wfs_mis_registrated,print_dm_properties=False,floating_precision=dm_0.floating_precision)
# temporary interaction matrix
calib_tmp = interactionMatrix(ngs,atm,tel,dm_tmp,wfs,basis.modes,stroke,phaseOffset=0,nMeasurements=50,invert=False,print_time=False)
# erase dm_tmp to free memory
del dm_tmp
# temporary scaling factor
try:
scalingFactor_tmp = np.round(np.diag(calib_tmp.D.T@calib_in.D)/ np.diag(calib_tmp.D.T@calib_tmp.D),precision)
# temporary mis-registration
misReg_tmp = gainEstimation*np.matmul(metaMatrix.M,np.reshape( [email protected](1/scalingFactor_tmp) - calib_tmp.D ,calib_in.D.shape[0]*calib_in.D.shape[1]))
except:
scalingFactor_tmp = np.round(np.sum(np.squeeze(calib_tmp.D)*np.squeeze(calib_in.D))/ np.sum(np.squeeze(calib_tmp.D)*np.squeeze(calib_tmp.D)),precision)
# temporary mis-registration
misReg_tmp = gainEstimation*np.matmul(metaMatrix.M,np.squeeze((np.squeeze(calib_in.D)*(1/scalingFactor_tmp)) - | np.squeeze(calib_tmp.D) | numpy.squeeze |
# -*- coding: utf-8 -*-
import argparse
import os
import os.path as osp
import numpy as np
import pandas as pd
# import sys
# cur_dir = osp.dirname(osp.abspath(__file__))
# sys.path.insert(0, osp.join(cur_dir, ".."))
def load_predicted_csv(fname):
df = pd.read_csv(fname)
info_list = df.to_dict("records")
return info_list
def parse_Rt_in_csv(item):
return np.array([float(i) for i in item.split(" ")])
def write_result(fname, results):
with open(fname, "w") as f:
f.write("scene_id,im_id,obj_id,score,R,t,time\n")
for item in results:
f.write("{:d},".format(item["scene_id"]))
f.write("{:d},".format(item["im_id"]))
f.write("{:d},".format(item["obj_id"]))
f.write("{:f},".format(item["score"]))
if isinstance(item["R"], np.ndarray):
R_list = item["R"].flatten().tolist()
else:
R_list = item["R"]
for i, r in enumerate(R_list):
sup = " " if i != 8 else ", "
f.write("{:f}{}".format(r, sup))
if isinstance(item["t"], np.ndarray):
t_list = item["t"].flatten().tolist()
else:
t_list = item["t"]
for i, t_item in enumerate(t_list):
sup = " " if i != 2 else ", "
f.write("{:f}{}".format(t_item, sup))
f.write("{:f}\n".format(item["time"]))
def main():
parser = argparse.ArgumentParser(description="Process time of the bop results file")
parser.add_argument("path", help="path to the bop results csv file")
args = parser.parse_args()
print("input file: ", args.path)
assert osp.exists(args.path), args.path
assert args.path.endswith(".csv"), args.path
results = load_predicted_csv(args.path)
# backup old file
os.system(f"cp -v {args.path} {args.path.replace('.csv', '.bak.csv')}")
# process time
times = {}
for item in results:
im_key = "{}/{}".format(item["scene_id"], item["im_id"])
if im_key not in times:
times[im_key] = []
times[im_key].append(item["time"])
for item in results:
im_key = "{}/{}".format(item["scene_id"], item["im_id"])
item["time"] = float( | np.max(times[im_key]) | numpy.max |
import unittest
from spn.algorithms.Inference import likelihood, log_likelihood
from spn.structure.Base import Context
from spn.structure.StatisticalTypes import MetaType
from spn.structure.leaves.histogram.Histograms import create_histogram_leaf
from spn.structure.leaves.histogram.Inference import add_histogram_inference_support
from spn.structure.leaves.parametric.Parametric import *
import numpy as np
from spn.structure.leaves.piecewise.Inference import add_piecewise_inference_support
from spn.structure.leaves.piecewise.PiecewiseLinear import create_piecewise_leaf
class TestPWL(unittest.TestCase):
def test_PWL_no_variance(self):
data = | np.array([1.0, 1.0]) | numpy.array |
import HubbardModelTools as hm
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
from scipy import interpolate
import scipy.linalg as sl
from scipy.signal import find_peaks
def c(s, i):
lst = list(s)
if(lst[i]=='0'): raise Exception("Error: passing a state annihilated by c")
lst[i] = '0'
return ''.join(lst)
def cdag(s, i):
lst = list(s)
if(lst[i]=='1'): raise Exception(r"Error: passing a state annihilated by c^\dagger")
lst[i] = '1'
return ''.join(lst)
#C_q
def c_q_up(basis,basis_minus,state,qx,k):
len_RepQx_minus = len(basis_minus.RepQx)
RepQxToIndex_minus = dict(zip(list(map(str,basis_minus.RepQx)), np.arange(0, len_RepQx_minus)))
components = np.zeros(len_RepQx_minus, dtype = np.complex128)
for Index_rep, rep in enumerate(basis.RepQx):
if (np.abs(state[Index_rep])<10**-15): continue
Up_state = np.binary_repr(rep[0], width = basis.L)
for i in np.arange(0,basis.L):
if(Up_state[i] == '1'):
NewUpInt = int(c(Up_state,i), 2)
Swapped_rep, j_x, sign, info = basis_minus.check_rep(NewUpInt, rep[1])
sign = sign*(-1)**(np.binary_repr(NewUpInt,width = basis.L)[:i].count('1')+np.binary_repr(rep[1],width = basis.L)[:i].count('1'))
if(info):
Index_Swapped_rep = RepQxToIndex_minus[str(Swapped_rep[0])]
components[Index_Swapped_rep] += sign*np.exp( 1j*(j_x*(k-qx)-qx*i) )*\
state[Index_rep]*basis_minus.NormRepQx[Index_Swapped_rep]/basis.NormRepQx[Index_rep]
return components/np.linalg.norm(components)
def c_q_down(basis,basis_minus,state,qx,k):
len_RepQx_minus = len(basis_minus.RepQx)
RepQxToIndex_minus = dict(zip(list(map(str,basis_minus.RepQx)), np.arange(0, len_RepQx_minus)))
components = np.zeros(len_RepQx_minus, dtype = np.complex128)
for Index_rep, rep in enumerate(basis.RepQx):
if (np.abs(state[Index_rep])<10**-15): continue
Down_state = np.binary_repr(rep[1], width = basis.L)
for i in np.arange(0,basis.L):
if(Down_state[i] == '1'):
NewDownInt = int(c(Down_state,i), 2)
Swapped_rep, j_x, sign, info = basis_minus.check_rep(rep[0], NewDownInt)
sign = sign*(-1)**(np.binary_repr(NewDownInt,width = basis.L)[:i].count('1')+np.binary_repr(rep[0],width = basis.L)[:i].count('1'))
if(info):
Index_Swapped_rep = RepQxToIndex_minus[str(Swapped_rep[0])]
components[Index_Swapped_rep] += sign*np.exp( 1j*(j_x*(k-qx)-qx*i) )*\
state[Index_rep]*basis_minus.NormRepQx[Index_Swapped_rep]/basis.NormRepQx[Index_rep]
return components/np.linalg.norm(components)
#C^dagger_q
def cdag_q_up(basis,basis_plus,state,qx,k):
len_RepQx_plus = len(basis_plus.RepQx)
RepQxToIndex_plus = dict(zip(list(map(str,basis_plus.RepQx)), np.arange(0, len_RepQx_plus)))
components = np.zeros(len_RepQx_plus, dtype = np.complex128)
for Index_rep, rep in enumerate(basis.RepQx):
if (np.abs(state[Index_rep])<10**-15): continue
Up_state = np.binary_repr(rep[0], width = basis.L)
for i in np.arange(0,basis.L):
if(Up_state[i] == '0'):
NewUpInt = int(cdag(Up_state,i), 2)
Swapped_rep, j_x, sign, info = basis_plus.check_rep(NewUpInt, rep[1])
sign = sign*(-1)**(np.binary_repr(NewUpInt,width = basis.L)[:i].count('1')+np.binary_repr(rep[1],width = basis.L)[:i].count('1'))
if(info):
Index_Swapped_rep = RepQxToIndex_plus[str(Swapped_rep[0])]
components[Index_Swapped_rep] += sign*np.exp( 1j*(j_x*(k-qx)-qx*i) )*\
state[Index_rep]*basis_plus.NormRepQx[Index_Swapped_rep]/basis.NormRepQx[Index_rep]
return components/np.linalg.norm(components)
def cdag_q_down(basis,basis_plus,state,qx,k):
len_RepQx_plus = len(basis_plus.RepQx)
RepQxToIndex_plus = dict(zip(list(map(str,basis_plus.RepQx)), np.arange(0, len_RepQx_plus)))
components = np.zeros(len_RepQx_plus, dtype = np.complex128)
for Index_rep, rep in enumerate(basis.RepQx):
if (np.abs(state[Index_rep])<10**-15): continue
Down_state = np.binary_repr(rep[1], width = basis.L)
for i in np.arange(0,basis.L):
if(Down_state[i] == '1'):
NewDownInt = int(c(Down_state,i), 2)
Swapped_rep, j_x, sign, info = basis_plus.check_rep(rep[0], NewDownInt)
sign = sign*(-1)**(np.binary_repr(NewDownInt,width = basis.L)[:i].count('1')+np.binary_repr(rep[0],width = basis.L)[:i].count('1'))
if(info):
Index_Swapped_rep = RepQxToIndex_plus[str(Swapped_rep[0])]
components[Index_Swapped_rep] += sign*np.exp( 1j*(j_x*(k-qx)-qx*i) )*\
state[Index_rep]*basis_plus.NormRepQx[Index_Swapped_rep]/basis.NormRepQx[Index_rep]
return components/np.linalg.norm(components)
def n_q(basis,basis_minus,state,k,qx):
len_RepQx_minus = len(basis_minus.RepQx)
RepQxToIndex_minus = dict(zip(list(map(str,basis_minus.RepQx)), np.arange(0, len_RepQx_minus)))
components = np.zeros(len_RepQx_minus, dtype = np.complex128)
for Index_rep, rep in enumerate(basis.RepQx):
if (np.abs(state[Index_rep])<10**-15): continue
if( not( str(rep) in RepQxToIndex_minus)): continue
Index_n_rep = RepQxToIndex_minus[str(rep)]
Up_state = np.binary_repr(rep[0], width = basis.L)
Down_state = np.binary_repr(rep[1], width = basis.L)
for j in np.arange(0,basis.L):
#By keeping only up/down one gets the operator for only up/down densities
Nup = int(Up_state[j])
Ndown = int(Down_state[j])
components[Index_n_rep] += state[Index_rep]*(Nup+Ndown)*np.exp(-1j*qx*j)*basis_minus.NormRepQx[Index_n_rep]/basis.NormRepQx[Index_rep]
return components/np.linalg.norm(components)
# Current <jG^-1j>
# j_x = c^\dagger_i *( c_{i-1} - c_{i+1})
# j_x = c^dagger_i c_{i-1} - c^\dagger_i c_{i+1}
# i-1 ----> i +
# i <---- i+1 -
# j_q = \sum_{n} e^{iqn} j_n
def j_q_up(basis,basis_minus,state,k,qx):
len_RepQx_minus = len(basis_minus.RepQx)
RepQxToIndex_minus = dict(zip(list(map(str,basis_minus.RepQx)), np.arange(0, len_RepQx_minus)))
components = np.zeros(len_RepQx_minus, dtype = np.complex128)
for Index_rep, rep in enumerate(basis.RepQx):
if (np.abs(state[Index_rep])<10**-15): continue
Up_state = np.binary_repr(rep[0], width = basis.L)
for i in np.arange(0,basis.L):
iprev = (i+1)%basis.L
inext = (i-1)%basis.L
if(Up_state[i] == '1'): continue
# Right hop ___ c^\dagger_i c_{i-1}
if(Up_state[iprev]=='1'):
NewUpInt = int( cdag(c(Up_state,iprev), i), 2)
Swapped_rep, j_x, sign, info = basis_minus.check_rep(NewUpInt, rep[1])
if(i==0):
sign = sign*(-1)**(basis.N+1)
# else: not get a sign
if(info):
Index_Swapped_rep = RepQxToIndex_minus[str(Swapped_rep[0])]
components[Index_Swapped_rep] += 1j*sign*np.exp( 1j*(j_x*(k-qx)-qx*i) )*\
state[Index_rep]*basis_minus.NormRepQx[Index_Swapped_rep]/basis.NormRepQx[Index_rep]
# Left hop ___ -c^\dagger_i c_{i+1}
if(Up_state[inext]=='1'):
NewUpInt = int( cdag(c(Up_state,inext), i), 2)
Swapped_rep, j_x, sign, info = basis_minus.check_rep(NewUpInt, rep[1])
if(i== (basis.L-1)):
sign = sign*(-1)**(basis.N+1)
# else: not get a sign
if(info):
Index_Swapped_rep = RepQxToIndex_minus[str(Swapped_rep[0])]
components[Index_Swapped_rep] += 1j*sign*np.exp( 1j*(j_x*(k-qx)-qx*i) )*\
state[Index_rep]*basis_minus.NormRepQx[Index_Swapped_rep]/basis.NormRepQx[Index_rep]
norm = np.linalg.norm(components)
return components/norm, norm
def j_q_down(basis,basis_minus,state,k,qx):
len_RepQx_minus = len(basis_minus.RepQx)
RepQxToIndex_minus = dict(zip(list(map(str,basis_minus.RepQx)), np.arange(0, len_RepQx_minus)))
components = np.zeros(len_RepQx_minus, dtype = np.complex128)
for Index_rep, rep in enumerate(basis.RepQx):
if (np.abs(state[Index_rep])<10**-15): continue
Down_state = np.binary_repr(rep[1], width = basis.L)
for i in np.arange(0,basis.L):
iprev = (i+1)%basis.L
inext = (i-1)%basis.L
if(Down_state[i] == '1'): continue
# Right hop ___ c^\dagger_i c_{i-1}
if(Down_state[iprev]=='1'):
NewDownInt = int( cdag(c(Down_state,iprev), i), 2)
Swapped_rep, j_x, sign, info = basis_minus.check_rep(rep[0], NewDownInt)
if(i==0):
sign = sign*(-1)**(basis.N+1)
# else: not get a sign
if(info):
Index_Swapped_rep = RepQxToIndex_minus[str(Swapped_rep[0])]
components[Index_Swapped_rep] += 1j*sign*np.exp( 1j*(j_x*(k-qx)-qx*i) )*\
state[Index_rep]*basis_minus.NormRepQx[Index_Swapped_rep]/basis.NormRepQx[Index_rep]
# Left hop ___ -c^\dagger_i c_{i+1}
if(Down_state[inext]=='1'):
NewDownInt = int( cdag(c(Down_state,inext), i), 2)
Swapped_rep, j_x, sign, info = basis_minus.check_rep(rep[0], NewDownInt)
if(i==(basis.L -1)):
sign = sign*(-1)**(basis.N+1)
# else: not get a sign
if(info):
Index_Swapped_rep = RepQxToIndex_minus[str(Swapped_rep[0])]
components[Index_Swapped_rep] += 1j*sign*np.exp( 1j*(j_x*(k-qx)-qx*i) )*\
state[Index_rep]*basis_minus.NormRepQx[Index_Swapped_rep]/basis.NormRepQx[Index_rep]
norm = np.linalg.norm(components)
return components/norm, norm
hf = hm.FermionicBasis_1d(6, 6, 12)
#For C_q
#hf_minus = hm.FermionicBasis_1d(3, 4, 8)
#For N_q
hf_minus = hm.FermionicBasis_1d(6, 6, 12)
#Better check those before every run
for ijk,U in enumerate(np.linspace(6,-12,1,endpoint=False)):
k = np.pi
H = hm.H_Qx(hf,k,U)
dimH = H.shape[0]
v0 = | np.random.random(dimH) | numpy.random.random |
"""
``FitFunction`` classes designed to assist in curve fitting of swept Langmuir
traces.
"""
__all__ = [
"AbstractFitFunction",
"Exponential",
"ExponentialPlusLinear",
"ExponentialPlusOffset",
"Linear",
]
import numbers
import numpy as np
from abc import ABC, abstractmethod
from collections import namedtuple
from scipy.optimize import curve_fit, fsolve
from scipy.stats import linregress
from typing import Optional, Tuple, Union
from warnings import warn
from plasmapy.utils.decorators import modify_docstring
#: Named tuple for :meth:`AbstractFitFunction.root_solve`.
_RootResults = namedtuple("RootResults", ("root", "err"))
class AbstractFitFunction(ABC):
"""
Abstract class for defining fit functions :math:`f(x)` and the tools for
fitting the function to a set of data.
"""
_param_names = NotImplemented # type: Tuple[str, ...]
def __init__(
self,
params: Tuple[float, ...] = None,
param_errors: Tuple[float, ...] = None,
):
"""
Parameters
----------
params: Tuple[float, ...], optional
Tuple of values for the function parameters. Equal in size to
:attr:`param_names`.
param_errors: Tuple[float, ...], optional
Tuple of values for the errors associated with the function
parameters. Equal in size to :attr:`param_names`.
"""
self._FitParamTuple = namedtuple("FitParamTuple", self._param_names)
if params is None:
self._params = None
else:
self.params = params
if param_errors is None:
self._param_errors = None
else:
self.param_errors = param_errors
self._curve_fit_results = None
self._rsq = None
def __call__(self, x, x_err=None, reterr=False):
"""
Direct call of the fit function :math:`f(x)`.
Parameters
----------
x: array_like
Dependent variables.
x_err: array_like, optional
Errors associated with the independent variables ``x``. Must be of
size one or equal to the size of ``x``.
reterr: bool, optional
(Default: `False`) If `True`, return an array of uncertainties
associated with the calculated independent variables
Returns
-------
y: `numpy.ndarray`
Corresponding dependent variables :math:`y=f(x)` of the independent
variables ``x``.
y_err: `numpy.ndarray`
Uncertainties associated with the calculated dependent variables
:math:`\\delta y`
"""
if reterr:
y_err, y = self.func_err(x, x_err=x_err, rety=True)
return y, y_err
y = self.func(x, *self.params)
return y
def __repr__(self):
return f"{self.__str__()} {self.__class__}"
@abstractmethod
def __str__(self):
...
@abstractmethod
def func(self, x, *args):
"""
The fit function. This signature of the function must first take the
independent variable followed by the parameters to be fitted as
separate arguments.
Parameters
----------
x: array_like
Independent variables to be passed to the fit function.
*args: Tuple[Union[float, int],...]
The parameters that will be adjusted to make the fit.
Returns
-------
`numpy.ndarray`:
The calculated dependent variables of the independent variables ``x``.
Notes
-----
* When sub-classing the definition should look something like::
def func(self, x, a, b, c):
x = self._check_x(x)
self._check_params(a, b, c)
return a * x ** 2 + b * x + c
"""
...
@abstractmethod
@modify_docstring(
prepend="""
Calculate dependent variable uncertainties :math:`\\delta y` for
dependent variables :math:`y=f(x)`.
""",
append="""
* When sub-classing the definition should look something like::
@modify_docstring(append=AbstractFitFunction.func_err.__original_doc__)
def func_err(self, x, x_err=None, rety=False):
'''
A simple docstring giving the equation for error propagation, but
excluding the parameter descriptions. The @modify_docstring
decorator will append the docstring from the parent class.
'''
x, x_err = self._check_func_err_params(x, x_err)
a, b, c = self.params
a_err, b_err, c_err = self.param_errors
# calculate error
if rety:
y = self.func(x, a, b, c)
return err, y
return err
""",
)
def func_err(self, x, x_err=None, rety=False):
"""
Parameters
----------
x: array_like
Independent variables to be passed to the fit function.
x_err: array_like, optional
Errors associated with the independent variables ``x``. Must be of
size one or equal to the size of ``x``.
rety: bool
Set `True` to also return the associated dependent variables
:math:`y = f(x)`.
Returns
-------
err: `numpy.ndarray`
The calculated uncertainties :math:`\\delta y` of the dependent
variables (:math:`y = f(x)`) of the independent variables ``x``.
y: `numpy.ndarray`, optional
(if ``rety == True``) The associated dependent variables
:math:`y = f(x)`.
Notes
-----
* A good reference for formulating propagation of uncertainty expressions is:
<NAME>. *An Introduction to Error Analysis: The Study of
Uncertainties in Physical Measurements.* University Science Books,
second edition, August 1996 (ISBN: 093570275X)
"""
...
@property
def curve_fit_results(self):
"""
The results returned by the curve fitting routine used by
:attr:`curve_fit`. This is typically from `scipy.stats.linregress` or
`scipy.optimize.curve_fit`.
"""
return self._curve_fit_results
@property
def FitParamTuple(self):
"""
A `~collections.namedtuple` used for attributes :attr:`params` and
:attr:`param_errors`. The attribute :attr:`param_names` defines
the tuple field names.
"""
return self._FitParamTuple
@property
def params(self) -> Optional[tuple]:
"""The fitted parameters for the fit function."""
if self._params is None:
return self._params
else:
return self.FitParamTuple(*self._params)
@params.setter
def params(self, val) -> None:
if isinstance(val, self.FitParamTuple) or (
isinstance(val, (tuple, list))
and len(val) == len(self.param_names)
and all(isinstance(vv, numbers.Real) for vv in val)
):
self._params = tuple(val)
else:
raise ValueError(
f"Got {val} for 'val', expecting tuple of ints and "
f"floats of length {len(self.param_names)}."
)
@property
def param_errors(self) -> Optional[tuple]:
"""The associated errors of the fitted :attr:`params`."""
if self._param_errors is None:
return self._param_errors
else:
return self.FitParamTuple(*self._param_errors)
@param_errors.setter
def param_errors(self, val) -> None:
if isinstance(val, self.FitParamTuple) or (
isinstance(val, (tuple, list))
and len(val) == len(self.param_names)
and all(isinstance(vv, numbers.Real) for vv in val)
):
self._param_errors = tuple(val)
else:
raise ValueError(
f"Got {val} for 'val', expecting tuple of ints and "
f"floats of length {len(self.param_names)}."
)
@property
def param_names(self) -> Tuple[str, ...]:
"""Names of the fitted parameters."""
return self._param_names
@property
@abstractmethod
def latex_str(self) -> str:
"""LaTeX friendly representation of the fit function."""
...
def _check_func_err_params(self, x, x_err):
"""Check the ``x`` and ``x_err`` parameters for :meth:`func_err`."""
x = self._check_x(x)
if x_err is not None:
x_err = self._check_x(x_err)
if x_err.shape == ():
pass
elif x_err.shape != x.shape:
raise ValueError(
f"x_err shape {x_err.shape} must be equal the shape of "
f"x {x.shape}."
)
return x, x_err
@staticmethod
def _check_params(*args) -> None:
"""
Check fitting parameters so that they are an expected type for the
class functionality.
"""
for arg in args:
if not isinstance(arg, numbers.Real):
raise TypeError(
f"Expected int or float for parameter argument, got "
f"{type(arg)}."
)
@staticmethod
def _check_x(x):
"""
Check the independent variable ``x`` so that it is an expected
type for the class functionality.
"""
if isinstance(x, numbers.Real):
x = np.array(x)
else:
if not isinstance(x, np.ndarray):
x = np.array(x)
if not (
np.issubdtype(x.dtype, np.integer)
or np.issubdtype(x.dtype, np.floating)
):
raise TypeError(
"Argument x needs to be an array_like object of integers "
"or floats."
)
x = x.squeeze()
if x.shape == ():
# force x to be a scalar
x = x[()]
return x
def root_solve(self, x0):
"""
Solve for the root of the fit function (i.e. :math:`f(x_r) = 0`). This
method used `scipy.optimize.fsolve` to find the function roots.
Parameters
----------
x0: `~numpy.ndarray`
The starting estimate for the roots of :math:`f(x_r) = 0`.
Returns
-------
x : `~numpy.ndarray`
The solution (or the result of the last iteration for an
unsuccessful call).
x_err: `~numpy.ndarray`
The uncertainty associated with the root calculation. **Currently
this returns an array of** `numpy.nan` **values equal in shape to**
``x`` **, since there is no determined way to calculate the
uncertainties.**
Notes
-----
If the full output of `scipy.optimize.fsolve` is desired then one can do:
>>> func = Linear()
>>> func.params = (1.0, 5.0)
>>> func.param_errors = (0.0, 0.0)
>>> roots = fsolve(func, -4.0, full_output=True)
>>> roots
(array([-5.]),
{'nfev': 4,
'fjac': array([[-1.]]),
'r': array([-1.]),
'qtf': array([2.18...e-12]),
'fvec': 0.0},
1,
'The solution converged.')
"""
results = fsolve(self.func, x0, args=self.params)
root = np.squeeze(results[0])
err = np.tile(np.nan, root.shape)
if root.shape == ():
# force x to be a scalar
root = root[()]
err = np.nan
return _RootResults(root, err)
@property
def rsq(self):
"""
Coefficient of determination (r-squared) value of the fit.
.. math::
r^2 &= 1 - \\frac{SS_{res}}{SS_{tot}}
SS_{res} &= \\sum\\limits_{i} (y_i - f(x_i))^2
SS_{tot} &= \\sum\\limits_{i} (y_i - \\bar{y})^2
where :math:`(x_i, y_i)` are the sample data pairs, :math:`f(x_i)` is
the fitted dependent variable corresponding to :math:`x_i`, and
:math:`\\bar{y}` is the average of the :math:`y_i` values.
The :math:`r^2` value is an indicator of how close the points
:math:`(x_i, y_i)` lie to the model :math:`f(x)`. :math:`r^2` values
range between 0 and 1. Values close to 0 indicate that the points
are uncorrelated and have little tendency to lie close to the model,
whereas, values close to 1 indicate a high correlation to the model.
"""
return self._rsq
def curve_fit(self, xdata, ydata, **kwargs) -> None:
"""
Use a non-linear least squares method to fit the fit function to
(``xdata``, ``ydata``), using `scipy.optimize.curve_fit`. This will set
the attributes :attr:`params`, :attr:`param_errors`, and
:attr:`rsq`.
The results of `scipy.optimize.curve_fit` can be obtained via
:attr:`curve_fit_results`.
Parameters
----------
xdata: array_like
The independent variable where data is measured. Should be 1D of
length M.
ydata: array_like
The dependent data associated with ``xdata``.
**kwargs
Any keywords accepted by `scipy.optimize.curve_fit`.
Raises
------
ValueError
if either ``ydata`` or ``xdata`` contain `numpy.nan`'s, or if
incompatible options are used.
RuntimeError
if the least-squares minimization fails.
~scipy.optimize.OptimizeWarning
if covariance of the parameters can not be estimated.
"""
popt, pcov = curve_fit(self.func, xdata, ydata, **kwargs)
self._curve_fit_results = (popt, pcov)
self.params = tuple(popt.tolist())
self.param_errors = tuple(np.sqrt(np.diag(pcov)).tolist())
# calc rsq
# rsq = 1 - (ss_res / ss_tot)
residuals = ydata - self.func(xdata, *self.params)
ss_res = | np.sum(residuals ** 2) | numpy.sum |
import numpy as np
from .base_element import BaseElement
class Sphere(BaseElement):
def __init__(self, object_index, medium_index, fl_brightness,
center, radius):
"""Sphere element
Parameters
----------
object_index: float
Refractive index of the element
medium_index: float
Refractive index of surrounding medium
fl_brightness: float
Fluorescence brightness
center: list-like
Center coordinates (x, y, z) of the sphere [m]
radius:
Radius of the sphere [m]
"""
#: radius of the sphere
self.radius = radius
points = np.atleast_2d(center)
super(Sphere, self).__init__(object_index=object_index,
medium_index=medium_index,
fl_brightness=fl_brightness,
points=points)
@property
def center(self):
return self.points[0]
def draw(self, grid_size, pixel_size):
ri = | np.ones(grid_size, dtype=float) | numpy.ones |
import os, sys, platform, json, operator, sqlite3, io, gzip, zlib, random, pickle, itertools, warnings, multiprocessing, h5py, statistics, inspect, requests, validators
from importlib import reload
from datetime import datetime
from time import sleep
from itertools import permutations # is this being used? or raw python combos? can it just be itertools.permutations?
from textwrap import dedent
from math import floor, log10
import pprint as pp
#OS agonstic system files.
import appdirs
# ORM.
from peewee import *
from playhouse.sqlite_ext import SqliteExtDatabase, JSONField
from playhouse.fields import PickleField
# ETL.
import pyarrow
from pyarrow import parquet
import pandas as pd
import numpy as np
# Sample prep. Unsupervised learning.
from sklearn.model_selection import train_test_split, StratifiedKFold
from sklearn.metrics import *
from sklearn.preprocessing import *
# Deep learning.
import keras
from keras.models import load_model, Sequential
from keras.callbacks import Callback
# Progress bar.
from tqdm import tqdm
# Visualization.
import plotly.express as px
# Images.
from PIL import Image as Imaje
# File sorting.
from natsort import natsorted
# Complex serialization.
import dill as dill
name = "aiqc"
"""
https://docs.python.org/3/library/multiprocessing.html#contexts-and-start-methods
- 'fork' makes all variables on main process available to child process. OS attempts not to duplicate all variables.
- 'spawn' requires that variables be passed to child as args, and seems to play by pickle's rules (e.g. no func in func).
- In Python 3.8, macOS changed default from 'fork' to 'spawn' , which is how I learned all this.
- Windows does not support 'fork'. It supports 'spawn'. So basically I have to play by spawn/ pickle rules.
- Spawn/ pickle dictates (1) where execute_jobs func is placed, (2) if MetricsCutoff func works, (3) if tqdm output is visible.
- Update: now MetricsCutoff is not working in `fork` mode.
- Wrote the `poll_progress` func for 'spawn' situations.
- If everything hits the fan, `run_jobs(in_background=False)` for a normal for loop.
- Tried `concurrent.futures` but it only works with `.py` from command line.
"""
if (os.name != 'nt'):
# If `force=False`, then `reload(aiqc)` triggers `RuntimeError: context already set`.
multiprocessing.set_start_method('fork', force=True)
#==================================================
# CONFIGURATION
#==================================================
app_dir_no_trailing_slash = appdirs.user_data_dir("aiqc")
# Adds either a trailing slash or backslashes depending on OS.
app_dir = os.path.join(app_dir_no_trailing_slash, '')
default_config_path = app_dir + "config.json"
default_db_path = app_dir + "aiqc.sqlite3"
def check_exists_folder():
# If Windows does not have permission to read the folder, it will fail when trailing backslashes \\ provided.
app_dir_exists = os.path.exists(app_dir_no_trailing_slash)
if app_dir_exists:
print(f"\n=> Success - the following file path already exists on your system:\n{app_dir}\n")
return True
else:
print(
f"=> Info - it appears the following folder does not exist on your system:\n{app_dir}\n\n" \
f"=> Fix - you can attempt to fix this by running `aiqc.create_folder()`.\n"
)
return False
def create_folder():
app_dir_exists = check_exists_folder()
if (app_dir_exists):
print(f"\n=> Info - skipping folder creation as folder already exists at file path:\n{app_dir}\n")
else:
try:
"""
- `makedirs` will create any missing intermediary dir(s) in addition to the target dir.
- Whereas `mkdir` only creates the target dir and fails if intermediary dir(s) are missing.
- If this break for whatever reason, could also try out `path.mkdir(parents=True)`.
"""
os.makedirs(app_dir)
# if os.name == 'nt':
# # Windows: backslashes \ and double backslashes \\
# command = 'mkdir ' + app_dir
# os.system(command)
# else:
# # posix (mac and linux)
# command = 'mkdir -p "' + app_dir + '"'
# os.system(command)
except:
raise OSError(f"\n=> Yikes - Local system failed to execute:\n`os.mkdirs('{app_dir}')\n")
print(
f"=> Success - created folder at file path:\n{app_dir}\n\n" \
f"=> Next run `aiqc.create_config()`.\n"
)
def check_permissions_folder():
app_dir_exists = check_exists_folder()
if (app_dir_exists):
# Windows `os.access()` always returning True even when I have verify permissions are in fact denied.
if (os.name == 'nt'):
# Test write.
file_name = "aiqc_test_permissions.txt"
def permissions_fail_info():
# We don't want an error here because it needs to return False.
print(
f"=> Yikes - your operating system user does not have permission to write to file path:\n{app_dir}\n\n" \
f"=> Fix - you can attempt to fix this by running `aiqc.grant_permissions_folder()`.\n"
)
try:
cmd_file_create = 'echo "test" >> ' + app_dir + file_name
write_response = os.system(cmd_file_create)
except:
permissions_fail_info()
return False
if (write_response != 0):
permissions_fail_info()
return False
else:
# Test read.
try:
read_response = os.system("type " + app_dir + file_name)
except:
permissions_fail_info()
return False
if (read_response != 0):
permissions_fail_info()
return False
else:
cmd_file_delete = "erase " + app_dir + file_name
os.system(cmd_file_delete)
print(f"\n=> Success - your operating system user can read from and write to file path:\n{app_dir}\n")
return True
else:
# posix
# https://www.geeksforgeeks.org/python-os-access-method/
readable = os.access(app_dir, os.R_OK)
writeable = os.access(app_dir, os.W_OK)
if (readable and writeable):
print(f"\n=> Success - your operating system user can read from and write to file path:\n{app_dir}\n")
return True
else:
if not readable:
print(f"\n=> Yikes - your operating system user does not have permission to read from file path:\n{app_dir}\n")
if not writeable:
print(f"\n=> Yikes - your operating system user does not have permission to write to file path:\n{app_dir}\n")
if not readable or not writeable:
print("\n=> Fix - you can attempt to fix this by running `aiqc.grant_permissions_folder()`.\n")
return False
else:
return False
def grant_permissions_folder():
permissions = check_permissions_folder()
if (permissions):
print(f"\n=> Info - skipping as you already have permissions to read from and write to file path:\n{app_dir}\n")
else:
try:
if (os.name == 'nt'):
# Windows ICACLS permissions: https://www.educative.io/edpresso/what-is-chmod-in-windows
# Works in Windows Command Prompt and `os.system()`, but not PowerShell.
# Does not work with trailing backslashes \\
command = 'icacls "' + app_dir_no_trailing_slash + '" /grant users:(F) /c'
os.system(command)
elif (os.name != 'nt'):
# posix
command = 'chmod +wr ' + '"' + app_dir + '"'
os.system(command)
except:
print(
f"=> Yikes - error failed to execute this system command:\n{command}\n\n" \
f"===================================\n"
)
raise
permissions = check_permissions_folder()
if permissions:
print(f"\n=> Success - granted system permissions to read and write from file path:\n{app_dir}\n")
else:
print(f"\n=> Yikes - failed to grant system permissions to read and write from file path:\n{app_dir}\n")
def get_config():
aiqc_config_exists = os.path.exists(default_config_path)
if aiqc_config_exists:
with open(default_config_path, 'r') as aiqc_config_file:
aiqc_config = json.load(aiqc_config_file)
return aiqc_config
else:
print("\n=> Welcome to AIQC.\nTo get started, run `aiqc.setup()`.\n")
def create_config():
#check if folder exists
folder_exists = check_exists_folder()
if folder_exists:
config_exists = os.path.exists(default_config_path)
if not config_exists:
aiqc_config = {
"created_at": str(datetime.now())
, "config_path": default_config_path
, "db_path": default_db_path
, "sys.version": sys.version
, "platform.python_implementation()": platform.python_implementation()
, "sys.prefix": sys.prefix
, "os.name": os.name
, "platform.version()": platform.version()
, "platform.java_ver()": platform.java_ver()
, "platform.win32_ver()": platform.win32_ver()
, "platform.libc_ver()": platform.libc_ver()
, "platform.mac_ver()": platform.mac_ver()
}
try:
with open(default_config_path, 'w') as aiqc_config_file:
json.dump(aiqc_config, aiqc_config_file)
except:
print(
f"=> Yikes - failed to create config file at path:\n{default_config_path}\n\n" \
f"=> Fix - you can attempt to fix this by running `aiqc.check_permissions_folder()`.\n" \
f"==================================="
)
raise
print(f"\n=> Success - created config file for settings at path:\n{default_config_path}\n")
reload(sys.modules[__name__])
else:
print(f"\n=> Info - skipping as config file already exists at path:\n{default_config_path}\n")
print("\n=> Next run `aiqc.create_db()`.\n")
def delete_config(confirm:bool=False):
aiqc_config = get_config()
if aiqc_config is None:
print("\n=> Info - skipping as there is no config file to delete.\n")
else:
if confirm:
config_path = aiqc_config['config_path']
try:
os.remove(config_path)
except:
print(
f"=> Yikes - failed to delete config file at path:\n{config_path}\n\n" \
f"===================================\n" \
)
raise
print(f"\n=> Success - deleted config file at path:\n{config_path}\n")
reload(sys.modules[__name__])
else:
print("\n=> Info - skipping deletion because `confirm` arg not set to boolean `True`.\n")
def update_config(kv:dict):
aiqc_config = get_config()
if aiqc_config is None:
print("\n=> Info - there is no config file to update.\n")
else:
for k, v in kv.items():
aiqc_config[k] = v
config_path = aiqc_config['config_path']
try:
with open(config_path, 'w') as aiqc_config_file:
json.dump(aiqc_config, aiqc_config_file)
except:
print(
f"=> Yikes - failed to update config file at path:\n{config_path}\n\n" \
f"===================================\n"
)
raise
print(f"\n=> Success - updated configuration settings:\n{aiqc_config}\n")
reload(sys.modules[__name__])
#==================================================
# DATABASE
#==================================================
def get_path_db():
"""
Originally, this code was in a child directory.
"""
aiqc_config = get_config()
if aiqc_config is None:
# get_config() will print a null condition.
pass
else:
db_path = aiqc_config['db_path']
return db_path
def get_db():
"""
The `BaseModel` of the ORM calls this function.
"""
path = get_path_db()
if path is None:
print("\n=> Info - Cannot fetch database yet because it has not been configured.\n")
else:
db = SqliteExtDatabase(path)
return db
def create_db():
# Future: Could let the user specify their own db name, for import tutorials. Could check if passed as an argument to create_config?
db_path = get_path_db()
db_exists = os.path.exists(db_path)
if db_exists:
print(f"\n=> Skipping database file creation as a database file already exists at path:\n{db_path}\n")
else:
# Create sqlite file for db.
try:
db = get_db()
except:
print(
f"=> Yikes - failed to create database file at path:\n{db_path}\n\n" \
f"===================================\n"
)
raise
print(f"\n=> Success - created database file at path:\n{db_path}\n")
db = get_db()
# Create tables inside db.
tables = db.get_tables()
table_count = len(tables)
if table_count > 0:
print(f"\n=> Info - skipping table creation as the following tables already exist.{tables}\n")
else:
db.create_tables([
File, Tabular, Image,
Dataset,
Label, Featureset,
Splitset, Foldset, Fold,
Encoderset, Labelcoder, Featurecoder,
Algorithm, Hyperparamset, Hyperparamcombo,
Batch, Jobset, Job, Result
])
tables = db.get_tables()
table_count = len(tables)
if table_count > 0:
print(f"\n💾 Success - created all database tables. 💾\n")
else:
print(
f"=> Yikes - failed to create tables.\n" \
f"Please see README file section titled: 'Deleting & Recreating the Database'\n"
)
def destroy_db(confirm:bool=False, rebuild:bool=False):
if (confirm==True):
db_path = get_path_db()
db_exists = os.path.exists(db_path)
if db_exists:
try:
os.remove(db_path)
except:
print(
f"=> Yikes - failed to delete database file at path:\n{db_path}\n\n" \
f"===================================\n"
)
raise
print(f"\n=> Success - deleted database file at path:\n{db_path}\n")
else:
print(f"\n=> Info - there is no file to delete at path:\n{db_path}\n")
reload(sys.modules[__name__])
if (rebuild==True):
create_db()
else:
print("\n=> Info - skipping destruction because `confirm` arg not set to boolean `True`.\n")
def setup():
create_folder()
create_config()
create_db()
#==================================================
# ORM
#==================================================
# --------- HELPER FUNCTIONS ---------
def listify(supposed_lst:object=None):
"""
- When only providing a single element, it's easy to forget to put it inside a list!
"""
if (supposed_lst is not None):
if (not isinstance(supposed_lst, list)):
supposed_lst = [supposed_lst]
# If it was already a list, check it for emptiness and `None`.
elif (isinstance(supposed_lst, list)):
if (not supposed_lst):
raise ValueError(dedent(
f"Yikes - The list you provided contained `None` as an element." \
f"{supposed_lst}"
))
if (None in supposed_lst):
raise ValueError(dedent(
f"Yikes - The list you provided contained `None` as an element." \
f"{supposed_lst}"
))
# Allow `is None` to pass through because we need it to trigger null conditions.
return supposed_lst
def dill_serialize(objekt:object):
blob = io.BytesIO()
dill.dump(objekt, blob)
blob = blob.getvalue()
return blob
def dill_deserialize(blob:bytes):
objekt = io.BytesIO(blob)
objekt = dill.load(objekt)
return objekt
# --------- END HELPERS ---------
class BaseModel(Model):
"""
- Runs when the package is imported. http://docs.peewee-orm.com/en/latest/peewee/models.html
- ORM: by inheritting the BaseModel class, each Model class does not have to set Meta.
"""
class Meta:
database = get_db()
class Dataset(BaseModel):
"""
The sub-classes are not 1-1 tables. They simply provide namespacing for functions
to avoid functions riddled with if statements about dataset_type and null parameters.
"""
dataset_type = CharField() #tabular, image, sequence, graph, audio.
file_count = IntegerField() # only includes file_types that match the dataset_type.
source_path = CharField(null=True)
#s3_path = CharField(null=True) # Write an order to check.
def make_label(id:int, columns:list):
columns = listify(columns)
l = Label.from_dataset(dataset_id=id, columns=columns)
return l
def make_featureset(
id:int
, include_columns:list = None
, exclude_columns:list = None
):
include_columns = listify(include_columns)
exclude_columns = listify(exclude_columns)
f = Featureset.from_dataset(
dataset_id = id
, include_columns = include_columns
, exclude_columns = exclude_columns
)
return f
def to_pandas(id:int, columns:list=None, samples:list=None):
dataset = Dataset.get_by_id(id)
columns = listify(columns)
samples = listify(samples)
if (dataset.dataset_type == 'tabular' or dataset.dataset_type == 'text'):
df = Dataset.Tabular.to_pandas(id=dataset.id, columns=columns, samples=samples)
elif (dataset.dataset_type == 'image'):
raise ValueError("\nYikes - `Dataset.Image` class does not have a `to_pandas()` method.\n")
return df
def to_numpy(id:int, columns:list=None, samples:list=None):
dataset = Dataset.get_by_id(id)
columns = listify(columns)
samples = listify(samples)
if (dataset.dataset_type == 'tabular'):
arr = Dataset.Tabular.to_numpy(id=id, columns=columns, samples=samples)
elif (dataset.dataset_type == 'image'):
if (columns is not None):
raise ValueError("\nYikes - `Dataset.Image.to_numpy` does not accept a `columns` argument.\n")
arr = Dataset.Image.to_numpy(id=id, samples=samples)
return arr
def sorted_file_list(dir_path:str):
if not os.path.exists(dir_path):
raise ValueError(f"\nYikes - The path you provided does not exist according to `os.path.exists(path)`:\n{path}\n")
path = os.path.abspath(dir_path)
if (os.path.isdir(path) == False):
raise ValueError(f"\nYikes - The path that you provided is not a directory:{path}\n")
file_paths = os.listdir(path)
# prune hidden files and directories.
file_paths = [f for f in file_paths if not f.startswith('.')]
file_paths = [f for f in file_paths if not os.path.isdir(f)]
if not file_paths:
raise ValueError(f"\nYikes - The directory that you provided has no files in it:{path}\n")
# folder path is already absolute
file_paths = [os.path.join(path, f) for f in file_paths]
file_paths = natsorted(file_paths)
return file_paths
class Tabular():
"""
This does not use a subclass e.g. `class Tabular(Dataset):`
because the ORM would make a separate table.
"""
dataset_type = 'tabular'
file_index = 0
file_count = 1
def from_path(
file_path:str
, source_file_format:str
, name:str = None
, dtype:dict = None
, column_names:list = None
, skip_header_rows:int = 'infer'
):
column_names = listify(column_names)
accepted_formats = ['csv', 'tsv', 'parquet']
if source_file_format not in accepted_formats:
raise ValueError(f"\nYikes - Available file formats include csv, tsv, and parquet.\nYour file format: {source_file_format}\n")
if not os.path.exists(file_path):
raise ValueError(f"\nYikes - The path you provided does not exist according to `os.path.exists(file_path)`:\n{file_path}\n")
if not os.path.isfile(file_path):
raise ValueError(dedent(
f"Yikes - The path you provided is a directory according to `os.path.isfile(file_path)`:" \
f"{file_path}"
f"But `dataset_type=='tabular'` only supports a single file, not an entire directory.`"
))
# Use the raw, not absolute path for the name.
if name is None:
name = file_path
source_path = os.path.abspath(file_path)
dataset = Dataset.create(
dataset_type = Dataset.Tabular.dataset_type
, file_count = Dataset.Tabular.file_count
, source_path = source_path
, name = name
)
try:
file = File.Tabular.from_file(
path = file_path
, source_file_format = source_file_format
, dtype = dtype
, column_names = column_names
, skip_header_rows = skip_header_rows
, dataset_id = dataset.id
)
except:
dataset.delete_instance() # Orphaned.
raise
return dataset
def from_pandas(
dataframe:object
, name:str = None
, dtype:dict = None
, column_names:list = None
):
column_names = listify(column_names)
if (type(dataframe).__name__ != 'DataFrame'):
raise ValueError("\nYikes - The `dataframe` you provided is not `type(dataframe).__name__ == 'DataFrame'`\n")
dataset = Dataset.create(
file_count = Dataset.Tabular.file_count
, dataset_type = Dataset.Tabular.dataset_type
, name = name
, source_path = None
)
try:
File.Tabular.from_pandas(
dataframe = dataframe
, dtype = dtype
, column_names = column_names
, dataset_id = dataset.id
)
except:
dataset.delete_instance() # Orphaned.
raise
return dataset
def from_numpy(
ndarray:object
, name:str = None
, dtype:dict = None
, column_names:list = None
):
column_names = listify(column_names)
if (type(ndarray).__name__ != 'ndarray'):
raise ValueError("\nYikes - The `ndarray` you provided is not of the type 'ndarray'.\n")
elif (ndarray.dtype.names is not None):
raise ValueError(dedent("""
Yikes - Sorry, we do not support NumPy Structured Arrays.
However, you can use the `dtype` dict and `columns_names` to handle each column specifically.
"""))
dimensions = len(ndarray.shape)
if (dimensions > 2) or (dimensions < 1):
raise ValueError(dedent(f"""
Yikes - Tabular Datasets only support 1D and 2D arrays.
Your array dimensions had <{dimensions}> dimensions.
"""))
dataset = Dataset.create(
file_count = Dataset.Tabular.file_count
, name = name
, source_path = None
, dataset_type = Dataset.Tabular.dataset_type
)
try:
File.Tabular.from_numpy(
ndarray = ndarray
, dtype = dtype
, column_names = column_names
, dataset_id = dataset.id
)
except:
dataset.delete_instance() # Orphaned.
raise
return dataset
def to_pandas(
id:int
, columns:list = None
, samples:list = None
):
file = Dataset.Tabular.get_main_file(id)
columns = listify(columns)
samples = listify(samples)
df = file.Tabular.to_pandas(id=file.id, samples=samples, columns=columns)
return df
def to_numpy(
id:int
, columns:list = None
, samples:list = None
):
dataset = Dataset.get_by_id(id)
columns = listify(columns)
samples = listify(samples)
# This calls the method above. It does not need `.Tabular`
df = dataset.to_pandas(columns=columns, samples=samples)
ndarray = df.to_numpy()
return ndarray
def get_main_file(id:int):
file = File.select().join(Dataset).where(
Dataset.id==id, File.file_type=='tabular', File.file_index==0
)[0]
return file
def get_main_tabular(id:int):
file = Dataset.Tabular.get_main_file(id)
tabular = file.tabulars[0]
return tabular
class Image():
dataset_type = 'image'
def from_folder(
folder_path:str
, name:str = None
, pillow_save:dict = {}
):
if name is None:
name = folder_path
source_path = os.path.abspath(folder_path)
file_paths = Dataset.sorted_file_list(source_path)
file_count = len(file_paths)
dataset = Dataset.create(
file_count = file_count
, name = name
, source_path = source_path
, dataset_type = Dataset.Image.dataset_type
)
#Make sure the shape and mode of each image are the same before writing the Dataset.
sizes = []
modes = []
for i, path in enumerate(tqdm(
file_paths
, desc = "🖼️ Validating Images 🖼️"
, ncols = 85
)):
img = Imaje.open(path)
sizes.append(img.size)
modes.append(img.mode)
if (len(set(sizes)) > 1):
raise ValueError(dedent(f"""
Yikes - All images in the Dataset must be of the same width and height.
`PIL.Image.size`\nHere are the unique sizes you provided:\n{set(sizes)}
"""))
elif (len(set(modes)) > 1):
raise ValueError(dedent(f"""
Yikes - All images in the Dataset must be of the same mode aka colorscale.
`PIL.Image.mode`\nHere are the unique modes you provided:\n{set(modes)}
"""))
try:
for i, p in enumerate(tqdm(
file_paths
, desc = "🖼️ Ingesting Images 🖼️"
, ncols = 85
)):
file = File.Image.from_file(
path = p
, pillow_save = pillow_save
, file_index = i
, dataset_id = dataset.id
)
except:
dataset.delete_instance() # Orphaned.
raise
return dataset
def from_urls(
urls:list
, pillow_save:dict = {}
, name:str = None
, source_path:str = None
):
urls = listify(urls)
for u in urls:
validation = validators.url(u)
if (validation != True): #`== False` doesn't work.
raise ValueError(f"\nYikes - Invalid url detected within `urls` list:\n'{u}'\n")
file_count = len(urls)
dataset = Dataset.create(
file_count = file_count
, name = name
, dataset_type = Dataset.Image.dataset_type
, source_path = source_path
)
#Make sure the shape and mode of each image are the same before writing the Dataset.
sizes = []
modes = []
for i, url in enumerate(tqdm(
urls
, desc = "🖼️ Validating Images 🖼️"
, ncols = 85
)):
img = Imaje.open(
requests.get(url, stream=True).raw
)
sizes.append(img.size)
modes.append(img.mode)
if (len(set(sizes)) > 1):
raise ValueError(dedent(f"""
Yikes - All images in the Dataset must be of the same width and height.
`PIL.Image.size`\nHere are the unique sizes you provided:\n{set(sizes)}
"""))
elif (len(set(modes)) > 1):
raise ValueError(dedent(f"""
Yikes - All images in the Dataset must be of the same mode aka colorscale.
`PIL.Image.mode`\nHere are the unique modes you provided:\n{set(modes)}
"""))
try:
for i, url in enumerate(tqdm(
urls
, desc = "🖼️ Ingesting Images 🖼️"
, ncols = 85
)):
file = File.Image.from_url(
url = url
, pillow_save = pillow_save
, file_index = i
, dataset_id = dataset.id
)
"""
for i, url in enumerate(urls):
file = File.Image.from_url(
url = url
, pillow_save = pillow_save
, file_index = i
, dataset_id = dataset.id
)
"""
except:
dataset.delete_instance() # Orphaned.
raise
return dataset
def to_pillow(id:int, samples:list=None):
"""
- This does not have `columns` attrbute because it is only for fetching images.
- Have to fetch as image before feeding into numpy `numpy.array(Image.open())`.
- Future: could return the tabular data along with it.
- Might need this for Preprocess where rotate images and such.
"""
samples = listify(samples)
files = Dataset.Image.get_image_files(id, samples=samples)
images = [f.Image.to_pillow(f.id) for f in files]
return images
def to_numpy(id:int, samples:list=None):
"""
- Because Pillow works directly with numpy, there's no need for pandas right now.
- But downstream methods are using pandas.
"""
samples = listify(samples)
images = Dataset.Image.to_pillow(id, samples=samples)
images = [np.array(img) for img in images]
images = np.array(images)
return images
def get_image_files(id:int, samples:list=None):
samples = listify(samples)
dataset = Dataset.get_by_id(id)
files = File.select().join(Dataset).where(
Dataset.id==id, File.file_type=='image'
).order_by(File.file_index)# Ascending by default.
# Select from list by index.
if (samples is not None):
files = [files[i] for i in samples]
return files
class Text():
dataset_type = 'text'
file_count = 1
column_name = 'TextData'
def from_strings(
strings: list,
name: str = None
):
for expectedString in strings:
if type(expectedString) != str:
raise ValueError(f'\nThe input contains an object of type non-str type: {type(expectedString)}')
dataframe = pd.DataFrame(strings, columns = Dataset.Text.column_name, dtype = "string")
return Dataset.Tabular.from_pandas(dataframe, name)
def from_pandas(
dataframe:object,
name:str = None,
dtype:dict = None,
column_names:list = None
):
if Dataset.Text.column_name not in dataframe.columns:
raise ValueError(r'TextData column not found in input df. Please rename the column containing the text data as "TextData"')
return Dataset.Tabular.from_pandas(dataframe, name, dtype, column_names)
def from_folder(
folder_path:str,
name:str = None
):
if name is None:
name = folder_path
source_path = os.path.abspath(folder_path)
input_files = Dataset.sorted_file_list(source_path)
file_count = len(input_files)
files_data = []
for input_file in input_files:
with open(input_file, 'r') as file_pointer:
files_data.extend([file_pointer.read()])
return Dataset.Text.from_strings(files_data, name)
def to_pandas(
id:int,
columns:list = None,
samples:list = None
):
return Dataset.Tabular.to_pandas(id, columns, samples)
def to_strings(
id:int,
samples:list = None
):
data_df = Dataset.Tabular.to_pandas(id, [Dataset.Text.column_name], samples)
return data_df[Dataset.Text.column_name].tolist()
def to_numpy(
id:int,
columns:list = None,
samples:list = None
):
return Dataset.Tabular.to_numpy(id, columns, samples)
# Graph
# node_data is pretty much tabular sequence (varied length) data right down to the columns.
# the only unique thing is an edge_data for each Graph file.
# attach multiple file types to a file File(id=1).tabular, File(id=1).graph?
class File(BaseModel):
"""
- Due to the fact that different types of Files have different attributes
(e.g. File.Tabular columns=JSON or File.Graph nodes=Blob, edges=Blob),
I am making each file type its own subclass and 1-1 table. This approach
allows for the creation of custom File types.
- If `blob=None` then isn't persisted therefore fetch from source_path or s3_path.
- Note that `dtype` does not require every column to be included as a key in the dictionary.
"""
blob = BlobField()
file_type = CharField()
file_format = CharField() # png, jpg, parquet
file_index = IntegerField() # image, sequence, graph
shape = JSONField()# images? could still get shape... graphs node_count and connection_count?
source_path = CharField(null=True)
dataset = ForeignKeyField(Dataset, backref='files')
"""
Classes are much cleaner than a knot of if statements in every method,
and `=None` for every parameter.
"""
class Tabular():
file_type = 'tabular'
file_format = 'parquet'
file_index = 0 # If Sequence needs this in future, just 'if None then 0'.
def from_pandas(
dataframe:object
, dataset_id:int
, dtype:dict = None # Accepts a single str for the entire df, but utlimate it gets saved as one dtype per column.
, column_names:list = None
, source_path:str = None # passed in via from_file
):
column_names = listify(column_names)
File.Tabular.df_validate(dataframe, column_names)
dataframe, columns, shape, dtype = File.Tabular.df_set_metadata(
dataframe=dataframe, column_names=column_names, dtype=dtype
)
blob = File.Tabular.df_to_compressed_parquet_bytes(dataframe)
dataset = Dataset.get_by_id(dataset_id)
file = File.create(
blob = blob
, file_type = File.Tabular.file_type
, file_format = File.Tabular.file_format
, file_index = File.Tabular.file_index
, shape = shape
, source_path = source_path
, dataset = dataset
)
try:
tabular = Tabular.create(
columns = columns
, dtypes = dtype
, file_id = file.id
)
except:
file.delete_instance() # Orphaned.
raise
return file
def from_numpy(
ndarray:object
, dataset_id:int
, column_names:list = None
, dtype:dict = None #Or single string.
):
column_names = listify(column_names)
"""
Only supporting homogenous arrays because structured arrays are a pain
when it comes time to convert them to dataframes. It complained about
setting an index, scalar types, and dimensionality... yikes.
Homogenous arrays keep dtype in `arr.dtype==dtype('int64')`
Structured arrays keep column names in `arr.dtype.names==('ID', 'Ring')`
Per column dtypes dtypes from structured array <https://stackoverflow.com/a/65224410/5739514>
"""
File.Tabular.arr_validate(ndarray)
"""
DataFrame method only accepts a single dtype str, or infers if None.
So deferring the dict-based dtype to our `from_pandas()` method.
Also deferring column_names since it runs there anyways.
"""
df = pd.DataFrame(data=ndarray)
file = File.Tabular.from_pandas(
dataframe = df
, dataset_id = dataset_id
, dtype = dtype
, column_names = column_names # Doesn't overwrite first row of homogenous array.
)
return file
def from_file(
path:str
, source_file_format:str
, dataset_id:int
, dtype:dict = None
, column_names:list = None
, skip_header_rows:int = 'infer'
):
column_names = listify(column_names)
df = File.Tabular.path_to_df(
path = path
, source_file_format = source_file_format
, column_names = column_names
, skip_header_rows = skip_header_rows
)
file = File.Tabular.from_pandas(
dataframe = df
, dataset_id = dataset_id
, dtype = dtype
, column_names = None # See docstring above.
, source_path = path
)
return file
def to_pandas(
id:int
, columns:list = None
, samples:list = None
):
f = File.get_by_id(id)
blob = io.BytesIO(f.blob)
columns = listify(columns)
samples = listify(samples)
# Filters.
df = pd.read_parquet(blob, columns=columns)
if samples is not None:
df = df.iloc[samples]
# Accepts dict{'column_name':'dtype_str'} or a single str.
tab = f.tabulars[0]
df_dtypes = tab.dtypes
if (df_dtypes is not None):
if (isinstance(df_dtypes, dict)):
if (columns is None):
columns = tab.columns
# Prunes out the excluded columns from the dtype dict.
df_dtype_cols = list(df_dtypes.keys())
for col in df_dtype_cols:
if (col not in columns):
del df_dtypes[col]
elif (isinstance(df_dtypes, str)):
pass #dtype just gets applied as-is.
df = df.astype(df_dtypes)
return df
def to_numpy(
id:int
, columns:list = None
, samples:list = None
):
"""
This is the only place where to_numpy() relies on to_pandas().
It does so because pandas is good with the parquet and dtypes.
"""
columns = listify(columns)
samples = listify(samples)
df = File.Tabular.to_pandas(id=id, columns=columns, samples=samples)
arr = df.to_numpy()
return arr
#Future: Add to_tensor and from_tensor? Or will numpy suffice?
def pandas_stringify_columns(df, columns):
"""
I don't want both string and int-based column names for when calling columns programmatically,
and more importantly, 'ValueError: parquet must have string column names'
"""
cols_raw = df.columns.to_list()
if columns is None:
# in case the columns were a range of ints.
cols_str = [str(c) for c in cols_raw]
else:
cols_str = columns
# dict from 2 lists
cols_dct = dict(zip(cols_raw, cols_str))
df = df.rename(columns=cols_dct)
columns = df.columns.to_list()
return df, columns
def df_validate(dataframe:object, column_names:list):
if (dataframe.empty):
raise ValueError("\nYikes - The dataframe you provided is empty according to `df.empty`\n")
if (column_names is not None):
col_count = len(column_names)
structure_col_count = dataframe.shape[1]
if col_count != structure_col_count:
raise ValueError(dedent(f"""
Yikes - The dataframe you provided has <{structure_col_count}> columns,
but you provided <{col_count}> columns.
"""))
def df_set_metadata(
dataframe:object
, column_names:list = None
, dtype:dict = None
):
shape = {}
shape['rows'], shape['columns'] = dataframe.shape[0], dataframe.shape[1]
# Passes in user-defined columns in case they are specified.
# Auto-assigned int based columns return a range when `df.columns` called so convert them to str.
dataframe, columns = File.Tabular.pandas_stringify_columns(df=dataframe, columns=column_names)
"""
At this point, user-provided `dtype` can be a dict or a singular string/ class.
But a Pandas dataframe in-memory only has `dtypes` dict not a singular `dtype` str.
"""
if (dtype is not None):
# Accepts dict{'column_name':'dtype_str'} or a single str.
dataframe = dataframe.astype(dtype)
"""
Check if any user-provided dtype against actual dataframe dtypes to see if conversions failed.
Pandas dtype seems robust in comparing dtypes:
Even things like `'double' == dataframe['col_name'].dtype` will pass when `.dtype==np.float64`.
Despite looking complex, category dtype converts to simple 'category' string.
"""
if (not isinstance(dtype, dict)):
# Inspect each column:dtype pair and check to see if it is the same as the user-provided dtype.
actual_dtypes = dataframe.dtypes.to_dict()
for col_nam, typ in actual_dtypes.items():
if (typ != dtype):
raise ValueError(dedent(f"""
Yikes - You specified `dtype={dtype},
but Pandas did not convert it: `dataframe['{col_name}'].dtype == {typ}`.
You can either use a different dtype, or try to set your dtypes prior to ingestion in Pandas.
"""))
elif (isinstance(dtype, dict)):
for col_name, typ in dtype.items():
if (typ != dataframe[col_name].dtype):
raise ValueError(dedent(f"""
Yikes - You specified `dataframe['{col_name}']:dtype('{typ}'),
but Pandas did not convert it: `dataframe['{col_name}'].dtype == {dataframe[col_name].dtype}`.
You can either use a different dtype, or try to set your dtypes prior to ingestion in Pandas.
"""))
"""
Rare types like np.uint8, np.double, 'bool',
but not np.complex64 and np.float128 (aka np.longfloat)
because `DataFrame.to_parquet(engine='auto')` fails.
- `StringArray.unique().tolist()` fails because stringarray doesnt have tolist()
^ can do unique().to_numpy().tolist() though.
"""
excluded_types = ['string', 'complex', 'longfloat', 'float128']
actual_dtypes = dataframe.dtypes.to_dict().items()
for col_name, typ in actual_dtypes:
for et in excluded_types:
if (et in str(typ)):
raise ValueError(dedent(f"""
Yikes - You specified `dtype['{col_name}']:'{typ}',
but aiqc does not support the following dtypes: {excluded_types}
"""))
"""
Now, we take the all of the resulting dataframe dtypes and save them.
Regardless of whether or not they were user-provided.
Convert the classed `dtype('float64')` to a string so we can use it in `.to_pandas()`
"""
dtype = {k: str(v) for k, v in actual_dtypes}
# Each object has the potential to be transformed so each object must be returned.
return dataframe, columns, shape, dtype
def df_to_compressed_parquet_bytes(dataframe:object):
"""
Parquet naturally preserves pandas/numpy dtypes.
fastparquet engine preserves timedelta dtype, alas it does not work with bytes!
https://towardsdatascience.com/stop-persisting-pandas-data-frames-in-csvs-f369a6440af5
"""
blob = io.BytesIO()
dataframe.to_parquet(
blob
, engine = 'pyarrow'
, compression = 'gzip'
, index = False
)
blob = blob.getvalue()
return blob
def path_to_df(
path:str
, source_file_format:str
, column_names:list
, skip_header_rows:int
):
"""
Previously, I was using pyarrow for all tabular/ sequence file formats.
However, it had worse support for missing column names and header skipping.
So I switched to pandas for handling csv/tsv, but read_parquet()
doesn't let you change column names easily, so using pyarrow for parquet.
"""
if not os.path.exists(path):
raise ValueError(f"\nYikes - The path you provided does not exist according to `os.path.exists(path)`:\n{path}\n")
if not os.path.isfile(path):
raise ValueError(f"\nYikes - The path you provided is not a file according to `os.path.isfile(path)`:\n{path}\n")
if (source_file_format == 'tsv') or (source_file_format == 'csv'):
if (source_file_format == 'tsv') or (source_file_format is None):
sep='\t'
source_file_format = 'tsv' # Null condition.
elif (source_file_format == 'csv'):
sep=','
df = pd.read_csv(
filepath_or_buffer = path
, sep = sep
, names = column_names
, header = skip_header_rows
)
elif (source_file_format == 'parquet'):
if (skip_header_rows != 'infer'):
raise ValueError(dedent("""
Yikes - The argument `skip_header_rows` is not supported for `source_file_format='parquet'`
because Parquet stores column names as metadata.\n
"""))
tbl = pyarrow.parquet.read_table(path)
if (column_names is not None):
tbl = tbl.rename_columns(column_names)
# At this point, still need to work with metadata in df.
df = tbl.to_pandas()
return df
def arr_validate(ndarray):
if (ndarray.dtype.names is not None):
raise ValueError("\nYikes - Sorry, we don't support structured arrays.\n")
if (ndarray.size == 0):
raise ValueError("\nYikes - The ndarray you provided is empty: `ndarray.size == 0`.\n")
dimensions = len(ndarray.shape)
if (dimensions == 1) and (all(np.isnan(ndarray))):
raise ValueError("\nYikes - Your entire 1D array consists of `NaN` values.\n")
elif (dimensions > 1) and (all(np.isnan(ndarray[0]))):
# Sometimes when coverting headered structures numpy will NaN them out.
ndarray = np.delete(ndarray, 0, axis=0)
print(dedent("""
Warning - The entire first row of your array is 'NaN',
which commonly happens in NumPy when headers are read into a numeric array,
so we deleted this row during ingestion.
"""))
class Image():
file_type = 'image'
def from_file(
path:str
, file_index:int
, dataset_id:int
, pillow_save:dict = {}
):
if not os.path.exists(path):
raise ValueError(f"\nYikes - The path you provided does not exist according to `os.path.exists(path)`:\n{path}\n")
if not os.path.isfile(path):
raise ValueError(f"\nYikes - The path you provided is not a file according to `os.path.isfile(path)`:\n{path}\n")
path = os.path.abspath(path)
img = Imaje.open(path)
shape = {
'width': img.size[0]
, 'height':img.size[1]
}
blob = io.BytesIO()
img.save(blob, format=img.format, **pillow_save)
blob = blob.getvalue()
dataset = Dataset.get_by_id(dataset_id)
file = File.create(
blob = blob
, file_type = File.Image.file_type
, file_format = img.format
, file_index = file_index
, shape = shape
, source_path = path
, dataset = dataset
)
try:
image = Image.create(
mode = img.mode
, file = file
, pillow_save = pillow_save
)
except:
file.delete_instance() # Orphaned.
raise
return file
def from_url(
url:str
, file_index:int
, dataset_id:int
, pillow_save:dict = {}
):
# URL format is validated in `from_urls`.
try:
img = Imaje.open(
requests.get(url, stream=True).raw
)
except:
raise ValueError(f"\nYikes - Could not open file at this url with Pillow library:\n{url}\n")
shape = {
'width': img.size[0]
, 'height':img.size[1]
}
blob = io.BytesIO()
img.save(blob, format=img.format, **pillow_save)
blob = blob.getvalue()
dataset = Dataset.get_by_id(dataset_id)
file = File.create(
blob = blob
, file_type = File.Image.file_type
, file_format = img.format
, file_index = file_index
, shape = shape
, source_path = url
, dataset = dataset
)
try:
image = Image.create(
mode = img.mode
, file = file
, pillow_save = pillow_save
)
except:
file.delete_instance() # Orphaned.
raise
return file
def to_pillow(id:int):
#https://pillow.readthedocs.io/en/stable/reference/Image.html#PIL.Image.open
file = File.get_by_id(id)
if (file.file_type != 'image'):
raise ValueError(dedent(f"""
Yikes - Only `file.file_type='image' can be converted to Pillow images.
But you provided `file.file_type`: <{file.file_type}>
"""))
img_bytes = io.BytesIO(file.blob)
img = Imaje.open(img_bytes)
return img
class Tabular(BaseModel):
"""
- Do not change `dtype=PickleField()` because we are stringifying the columns.
I was tempted to do so for types like `np.float`, but we parse the final
type that Pandas decides to use.
"""
# Is sequence just a subset of tabular with a file_index?
columns = JSONField()
dtypes = JSONField()
file = ForeignKeyField(File, backref='tabulars')
class Image(BaseModel):
#https://pillow.readthedocs.io/en/stable/handbook/concepts.html#modes
mode = CharField()
pillow_save = JSONField()
file = ForeignKeyField(File, backref='images')
class Label(BaseModel):
"""
- Label accepts multiple columns in case it is already OneHotEncoded (e.g. tensors).
- At this point, we assume that the Label is always a tabular dataset.
"""
columns = JSONField()
column_count = IntegerField()
unique_classes = JSONField(null=True) # For categoricals and binaries. None for continuous.
#probabilities = JSONField() #if you were to write back the result of unsupervised for semi-supervised learning.
dataset = ForeignKeyField(Dataset, backref='labels')
def from_dataset(dataset_id:int, columns:list):
d = Dataset.get_by_id(dataset_id)
columns = listify(columns)
if (d.dataset_type != 'tabular'):
raise ValueError(dedent(f"""
Yikes - Labels can only be created from `dataset_type='tabular'`.
But you provided `dataset_type`: <{d.dataset_type}>
"""))
d_cols = Dataset.Tabular.get_main_tabular(dataset_id).columns
# Check that the user-provided columns exist.
all_cols_found = all(col in d_cols for col in columns)
if not all_cols_found:
raise ValueError("\nYikes - You specified `columns` that do not exist in the Dataset.\n")
# Check for duplicates of this label that already exist.
cols_aplha = sorted(columns)
d_labels = d.labels
count = d_labels.count()
if (count > 0):
for l in d_labels:
l_id = str(l.id)
l_cols = l.columns
l_cols_alpha = sorted(l_cols)
if cols_aplha == l_cols_alpha:
raise ValueError(f"\nYikes - This Dataset already has Label <id:{l_id}> with the same columns.\nCannot create duplicate.\n")
column_count = len(columns)
label_df = Dataset.to_pandas(id=dataset_id, columns=columns)
"""
- When multiple columns are provided, they must be OHE.
- Figure out column count because classification_binary and associated
metrics can't be run on > 2 columns.
- Negative values do not alter type of numpy int64 and float64 arrays.
"""
if (column_count > 1):
unique_values = []
for c in columns:
uniques = label_df[c].unique()
unique_values.append(uniques)
if (len(uniques) == 1):
print(
f"Warning - There is only 1 unique value for this label column.\n" \
f"Unique value: <{uniques[0]}>\n" \
f"Label column: <{c}>\n"
)
flat_uniques = np.concatenate(unique_values).ravel()
all_uniques = np.unique(flat_uniques).tolist()
for i in all_uniques:
if (
((i == 0) or (i == 1))
or
((i == 0.) or (i == 1.))
):
pass
else:
raise ValueError(dedent(f"""
Yikes - When multiple columns are provided, they must be One Hot Encoded:
Unique values of your columns were neither (0,1) or (0.,1.) or (0.0,1.0).
The columns you provided contained these unique values: {all_uniques}
"""))
unique_classes = all_uniques
del label_df
# Now check if each row in the labels is truly OHE.
label_arr = Dataset.to_numpy(id=dataset_id, columns=columns)
for i, arr in enumerate(label_arr):
if 1 in arr:
arr = list(arr)
arr.remove(1)
if 1 in arr:
raise ValueError(dedent(f"""
Yikes - Label row <{i}> is supposed to be an OHE row,
but it contains multiple hot columns where value is 1.
"""))
else:
raise ValueError(dedent(f"""
Yikes - Label row <{i}> is supposed to be an OHE row,
but it contains no hot columns where value is 1.
"""))
elif (column_count == 1):
# At this point, `label_df` is a single column df that needs to fected as a Series.
col = columns[0]
label_series = label_df[col]
label_dtype = label_series.dtype
if (np.issubdtype(label_dtype, np.floating)):
unique_classes = None
else:
unique_classes = label_series.unique().tolist()
class_count = len(unique_classes)
if (
(np.issubdtype(label_dtype, np.signedinteger))
or
(np.issubdtype(label_dtype, np.unsignedinteger))
):
if (class_count >= 5):
print(
f"Tip - Detected `unique_classes >= {class_count}` for an integer Label." \
f"If this Label is not meant to be categorical, then we recommend you convert to a float-based dtype." \
f"Although you'll still be able to bin these integers when it comes time to make a Splitset."
)
if (class_count == 1):
print(
f"Tip - Only detected 1 unique label class. Should have 2 or more unique classes." \
f"Your Label's only class was: <{unique_classes[0]}>."
)
l = Label.create(
dataset = d
, columns = columns
, column_count = column_count
, unique_classes = unique_classes
)
return l
def to_pandas(id:int, samples:list=None):
samples = listify(samples)
l_frame = Label.get_label(id=id, numpy_or_pandas='pandas', samples=samples)
return l_frame
def to_numpy(id:int, samples:list=None):
samples = listify(samples)
l_arr = Label.get_label(id=id, numpy_or_pandas='numpy', samples=samples)
return l_arr
def get_label(id:int, numpy_or_pandas:str, samples:list=None):
samples = listify(samples)
l = Label.get_by_id(id)
l_cols = l.columns
dataset_id = l.dataset.id
if (numpy_or_pandas == 'numpy'):
lf = Dataset.to_numpy(
id = dataset_id
, columns = l_cols
, samples = samples
)
elif (numpy_or_pandas == 'pandas'):
lf = Dataset.to_pandas(
id = dataset_id
, columns = l_cols
, samples = samples
)
return lf
def get_dtypes(
id:int
):
l = Label.get_by_id(id)
dataset = l.dataset
l_cols = l.columns
tabular_dtype = Dataset.Tabular.get_main_tabular(dataset.id).dtypes
label_dtypes = {}
for key,value in tabular_dtype.items():
for col in l_cols:
if (col == key):
label_dtypes[col] = value
# Exit `col` loop early becuase matching `col` found.
break
return label_dtypes
class Featureset(BaseModel):
"""
- Remember, a Featureset is just a record of the columns being used.
- Decided not to go w subclasses of Unsupervised and Supervised because that would complicate the SDK for the user,
and it essentially forked every downstream model into two subclasses.
- PCA components vary across featuresets. When different columns are used those columns have different component values.
"""
columns = JSONField(null=True)
columns_excluded = JSONField(null=True)
dataset = ForeignKeyField(Dataset, backref='featuresets')
def from_dataset(
dataset_id:int
, include_columns:list=None
, exclude_columns:list=None
#Future: runPCA #,run_pca:boolean=False # triggers PCA analysis of all columns
):
"""
As we get further away from the `Dataset.<Types>` they need less isolation.
"""
d = Dataset.get_by_id(dataset_id)
include_columns = listify(include_columns)
exclude_columns = listify(exclude_columns)
if (d.dataset_type == 'image'):
# Just passes the Dataset through for now.
if (include_columns is not None) or (exclude_columns is not None):
raise ValueError("\nYikes - The `Dataset.Image` classes supports neither the `include_columns` nor `exclude_columns` arguemnt.\n")
columns = None
columns_excluded = None
elif (d.dataset_type == 'tabular'):
d_cols = Dataset.Tabular.get_main_tabular(dataset_id).columns
if (include_columns is not None) and (exclude_columns is not None):
raise ValueError("\nYikes - You can set either `include_columns` or `exclude_columns`, but not both.\n")
if (include_columns is not None):
# check columns exist
all_cols_found = all(col in d_cols for col in include_columns)
if not all_cols_found:
raise ValueError("\nYikes - You specified `include_columns` that do not exist in the Dataset.\n")
# inclusion
columns = include_columns
# exclusion
columns_excluded = d_cols
for col in include_columns:
columns_excluded.remove(col)
elif (exclude_columns is not None):
all_cols_found = all(col in d_cols for col in exclude_columns)
if not all_cols_found:
raise ValueError("\nYikes - You specified `exclude_columns` that do not exist in the Dataset.\n")
# exclusion
columns_excluded = exclude_columns
# inclusion
columns = d_cols
for col in exclude_columns:
columns.remove(col)
if not columns:
raise ValueError("\nYikes - You cannot exclude every column in the Dataset. For there will be nothing to analyze.\n")
else:
columns = d_cols
columns_excluded = None
"""
Check that this Dataset does not already have a Featureset that is exactly the same.
There are less entries in `excluded_columns` so maybe it's faster to compare that.
"""
if columns_excluded is not None:
cols_aplha = sorted(columns_excluded)
else:
cols_aplha = None
d_featuresets = d.featuresets
count = d_featuresets.count()
if count > 0:
for f in d_featuresets:
f_id = str(f.id)
f_cols = f.columns_excluded
if f_cols is not None:
f_cols_alpha = sorted(f_cols)
else:
f_cols_alpha = None
if cols_aplha == f_cols_alpha:
raise ValueError(dedent(f"""
Yikes - This Dataset already has Featureset <id:{f_id}> with the same columns.
Cannot create duplicate.
"""))
f = Featureset.create(
dataset = d
, columns = columns
, columns_excluded = columns_excluded
)
return f
def to_pandas(id:int, samples:list=None, columns:list=None):
samples = listify(samples)
columns = listify(columns)
f_frame = Featureset.get_featureset(
id = id
, numpy_or_pandas = 'pandas'
, samples = samples
, columns = columns
)
return f_frame
def to_numpy(id:int, samples:list=None, columns:list=None):
samples = listify(samples)
columns = listify(columns)
f_arr = Featureset.get_featureset(
id = id
, numpy_or_pandas = 'numpy'
, samples = samples
, columns = columns
)
return f_arr
def get_featureset(
id:int
, numpy_or_pandas:str
, samples:list = None
, columns:list = None
):
f = Featureset.get_by_id(id)
samples = listify(samples)
columns = listify(columns)
f_cols = f.columns
if (columns is not None):
for c in columns:
if c not in f_cols:
raise ValueError("\nYikes - Cannot fetch column '{c}' because it is not in `Featureset.columns`.\n")
f_cols = columns
dataset_id = f.dataset.id
if (numpy_or_pandas == 'numpy'):
ff = Dataset.to_numpy(
id = dataset_id
, columns = f_cols
, samples = samples
)
elif (numpy_or_pandas == 'pandas'):
ff = Dataset.to_pandas(
id = dataset_id
, columns = f_cols
, samples = samples
)
return ff
def get_dtypes(
id:int
):
f = Featureset.get_by_id(id)
dataset = f.dataset
if (dataset.dataset_type == 'image'):
raise ValueError("\nYikes - `featureset.dataset.dataset_type=='image'` does not have dtypes.\n")
f_cols = f.columns
tabular_dtype = Dataset.Tabular.get_main_tabular(dataset.id).dtypes
featureset_dtypes = {}
for key,value in tabular_dtype.items():
for col in f_cols:
if (col == key):
featureset_dtypes[col] = value
# Exit `col` loop early becuase matching `col` found.
break
return featureset_dtypes
def make_splitset(
id:int
, label_id:int = None
, size_test:float = None
, size_validation:float = None
, bin_count:int = None
):
s = Splitset.from_featureset(
featureset_id = id
, label_id = label_id
, size_test = size_test
, size_validation = size_validation
, bin_count = bin_count
)
return s
class Splitset(BaseModel):
"""
- Belongs to a Featureset, not a Dataset, because the samples selected vary based on the stratification of the features during the split,
and a Featureset already has a Dataset anyways.
- Here the `samples_` attributes contain indices.
-ToDo: store and visualize distributions of each column in training split, including label.
-Future: is it useful to specify the size of only test for unsupervised learning?
"""
samples = JSONField()
sizes = JSONField()
supervision = CharField()
has_test = BooleanField()
has_validation = BooleanField()
bin_count = IntegerField(null=True)
featureset = ForeignKeyField(Featureset, backref='splitsets')
label = ForeignKeyField(Label, deferrable='INITIALLY DEFERRED', null=True, backref='splitsets')
def from_featureset(
featureset_id:int
, label_id:int = None
, size_test:float = None
, size_validation:float = None
, bin_count:float = None
):
if (size_test is not None):
if (size_test <= 0.0) or (size_test >= 1.0):
raise ValueError("\nYikes - `size_test` must be between 0.0 and 1.0\n")
# Don't handle `has_test` here. Need to check label first.
if (size_validation is not None) and (size_test is None):
raise ValueError("\nYikes - you specified a `size_validation` without setting a `size_test`.\n")
if (size_validation is not None):
if (size_validation <= 0.0) or (size_validation >= 1.0):
raise ValueError("\nYikes - `size_test` must be between 0.0 and 1.0\n")
sum_test_val = size_validation + size_test
if sum_test_val >= 1.0:
raise ValueError("\nYikes - Sum of `size_test` + `size_test` must be between 0.0 and 1.0 to leave room for training set.\n")
"""
Have to run train_test_split twice do the math to figure out the size of 2nd split.
Let's say I want {train:0.67, validation:0.13, test:0.20}
The first test_size is 20% which leaves 80% of the original data to be split into validation and training data.
(1.0/(1.0-0.20))*0.13 = 0.1625
"""
pct_for_2nd_split = (1.0/(1.0-size_test))*size_validation
has_validation = True
else:
has_validation = False
f = Featureset.get_by_id(featureset_id)
f_cols = f.columns
# Feature data to be split.
d = f.dataset
arr_f = Dataset.to_numpy(id=d.id, columns=f_cols)
"""
Simulate an index to be split alongside features and labels
in order to keep track of the samples being used in the resulting splits.
"""
row_count = arr_f.shape[0]
arr_idx = | np.arange(row_count) | numpy.arange |
"""
Plot 2D timeseries data heatmaps such as secondary structure and per residue RMSD.
"""
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
import matplotlib.cm as cm
from matplotlib.colors import Normalize
from matplotlib.colors import ListedColormap
from matplotlib.ticker import (MultipleLocator, AutoMinorLocator)
import matplotlib.patches
from numpy.lib import genfromtxt
#plt.rcParams['figure.figsize']= (12,6)
plt.rcParams.update({'font.size': 14})
plt.rcParams["font.family"]="Sans-serif"
plt.rcParams['font.sans-serif'] = 'Arial'
plt.rcParams['mathtext.default'] = 'regular'
plt.rcParams['axes.linewidth'] = 2.25
plt.rcParams['xtick.major.size'] = 6
plt.rcParams['xtick.major.width'] = 2.5
plt.rcParams['xtick.minor.size'] = 2
plt.rcParams['xtick.minor.width'] = 2
plt.rcParams['ytick.major.size'] = 6
plt.rcParams['ytick.major.width'] = 2.5
class Per_Res_Plot:
def __init__(self, file, timescale=10**6, data_interval=1, ax=None):
"""
Parameters
----------
file : str or list
Path to the cpptraj data file.
timescale : int
Convert from frame to timescale. Default 1ps per frame to us scale.
data_interval : int
Optionally process data in larger intervals, default 1.
"""
self.file = file
self.timescale = timescale
self.data_interval = data_interval
# TODO: there has to be a best practise for this
if ax is None:
self.fig, self.ax = plt.subplots(figsize=(12,5))
else:
self.fig = plt.gca()
self.ax = ax
def add_patch(self, ax, recx, recy, facecolor, text, recwidth=0.04, recheight=0.05, recspace=0):
ax = self.ax
ax.add_patch(matplotlib.patches.Rectangle((recx, recy),
recwidth, recheight,
facecolor=facecolor,
edgecolor='black',
clip_on=False,
transform=ax.transAxes,
lw=2.25)
)
ax.text(recx + recheight + recspace, recy + recheight / 2.8, text, ha='left', va='center',
transform=ax.transAxes, fontsize=12)
def process_per_res_data(self):
"""
Process the cpptraj data that is in per-residue format, e.g. for DSSP and RMSD.
Head of file is each residue, e.g. "1:MET", first column of file is the frame.
Returns
-------
x : ndarray
1D array of the timepoints.
y : ndarray
1D array of the residue string and numbers.
z : ndarray
2D array of the x by y data values.
"""
if type(self.file) == list:
header = np.genfromtxt(self.file[0], dtype=str, max_rows=1, comments=None)
# build array from total frame counts and residue number
frames = [len(np.genfromtxt(f, usecols=0)) for f in self.file]
self.x = np.divide(np.arange(0, sum(frames), 1), self.timescale)
data = np.zeros(shape=(sum(frames), len(header)))
# fill out data array for each dataset in file list
frame_index = 0
for num, val in enumerate(frames):
data[frame_index:val + frame_index, :] = np.genfromtxt(self.file[num])
frame_index += val
else:
header = np.genfromtxt(self.file, dtype=str, max_rows=1, comments=None)
data = np.genfromtxt(self.file)[::self.data_interval,:]
self.x = np.divide( | np.genfromtxt(self.file, usecols=0) | numpy.genfromtxt |
import glob
import random
import time
import os
import os.path as osp
import cv2
import matplotlib.pyplot as plt
import numpy as np
import torch
import torch.nn.functional as F
from torchvision.ops import nms
#import maskrcnn_benchmark.layers.nms as nms
# Set printoptions
torch.set_printoptions(linewidth=1320, precision=5, profile='long')
np.set_printoptions(linewidth=320, formatter={'float_kind': '{:11.5g}'.format}) # format short g, %precision=5
def mkdir_if_missing(d):
if not osp.exists(d):
os.makedirs(d)
def float3(x): # format floats to 3 decimals
return float(format(x, '.3f'))
def init_seeds(seed=0):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
def load_classes(path):
"""
Loads class labels at 'path'
"""
fp = open(path, 'r')
names = fp.read().split('\n')
return list(filter(None, names)) # filter removes empty strings (such as last line)
def model_info(model): # Plots a line-by-line description of a PyTorch model
n_p = sum(x.numel() for x in model.parameters()) # number parameters
n_g = sum(x.numel() for x in model.parameters() if x.requires_grad) # number gradients
print('\n%5s %50s %9s %12s %20s %12s %12s' % ('layer', 'name', 'gradient', 'parameters', 'shape', 'mu', 'sigma'))
for i, (name, p) in enumerate(model.named_parameters()):
name = name.replace('module_list.', '')
print('%5g %50s %9s %12g %20s %12.3g %12.3g' % (
i, name, p.requires_grad, p.numel(), list(p.shape), p.mean(), p.std()))
print('Model Summary: %g layers, %g parameters, %g gradients\n' % (i + 1, n_p, n_g))
def plot_one_box(x, img, color=None, label=None, line_thickness=None): # Plots one bounding box on image img
tl = line_thickness or round(0.0004 * max(img.shape[0:2])) + 1 # line thickness
color = color or [random.randint(0, 255) for _ in range(3)]
c1, c2 = (int(x[0]), int(x[1])), (int(x[2]), int(x[3]))
cv2.rectangle(img, c1, c2, color, thickness=tl)
if label:
tf = max(tl - 1, 1) # font thickness
t_size = cv2.getTextSize(label, 0, fontScale=tl / 3, thickness=tf)[0]
c2 = c1[0] + t_size[0], c1[1] - t_size[1] - 3
cv2.rectangle(img, c1, c2, color, -1) # filled
cv2.putText(img, label, (c1[0], c1[1] - 2), 0, tl / 3, [225, 255, 255], thickness=tf, lineType=cv2.LINE_AA)
def weights_init_normal(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
torch.nn.init.normal_(m.weight.data, 0.0, 0.03)
elif classname.find('BatchNorm2d') != -1:
torch.nn.init.normal_(m.weight.data, 1.0, 0.03)
torch.nn.init.constant_(m.bias.data, 0.0)
def xyxy2xywh(x):
# Convert bounding box format from [x1, y1, x2, y2] to [x, y, w, h]
y = torch.zeros(x.shape) if x.dtype is torch.float32 else np.zeros(x.shape)
y[:, 0] = (x[:, 0] + x[:, 2]) / 2
y[:, 1] = (x[:, 1] + x[:, 3]) / 2
y[:, 2] = x[:, 2] - x[:, 0]
y[:, 3] = x[:, 3] - x[:, 1]
return y
def xywh2xyxy(x):
# Convert bounding box format from [x, y, w, h] to [x1, y1, x2, y2]
y = torch.zeros(x.shape) if x.dtype is torch.float32 else np.zeros(x.shape)
y[:, 0] = (x[:, 0] - x[:, 2] / 2)
y[:, 1] = (x[:, 1] - x[:, 3] / 2)
y[:, 2] = (x[:, 0] + x[:, 2] / 2)
y[:, 3] = (x[:, 1] + x[:, 3] / 2)
return y
def scale_coords(img_size, coords, img0_shape):
# Rescale x1, y1, x2, y2 from 416 to image size
gain_w = float(img_size[0]) / img0_shape[1] # gain = old / new
gain_h = float(img_size[1]) / img0_shape[0]
gain = min(gain_w, gain_h)
pad_x = (img_size[0] - img0_shape[1] * gain) / 2 # width padding
pad_y = (img_size[1] - img0_shape[0] * gain) / 2 # height padding
coords[:, [0, 2]] -= pad_x
coords[:, [1, 3]] -= pad_y
coords[:, 0:4] /= gain
coords[:, :4] = torch.clamp(coords[:, :4], min=0)
return coords
def ap_per_class(tp, conf, pred_cls, target_cls):
""" Compute the average precision, given the recall and precision curves.
Method originally from https://github.com/rafaelpadilla/Object-Detection-Metrics.
# Arguments
tp: True positives (list).
conf: Objectness value from 0-1 (list).
pred_cls: Predicted object classes (list).
target_cls: True object classes (list).
# Returns
The average precision as computed in py-faster-rcnn.
"""
# lists/pytorch to numpy
tp, conf, pred_cls, target_cls = np.array(tp), np.array(conf), np.array(pred_cls), np.array(target_cls)
# Sort by objectness
i = np.argsort(-conf)
tp, conf, pred_cls = tp[i], conf[i], pred_cls[i]
# Find unique classes
unique_classes = np.unique(np.concatenate((pred_cls, target_cls), 0))
# Create Precision-Recall curve and compute AP for each class
ap, p, r = [], [], []
for c in unique_classes:
i = pred_cls == c
n_gt = sum(target_cls == c) # Number of ground truth objects
n_p = sum(i) # Number of predicted objects
if (n_p == 0) and (n_gt == 0):
continue
elif (n_p == 0) or (n_gt == 0):
ap.append(0)
r.append(0)
p.append(0)
else:
# Accumulate FPs and TPs
fpc = np.cumsum(1 - tp[i])
tpc = np.cumsum(tp[i])
# Recall
recall_curve = tpc / (n_gt + 1e-16)
r.append(tpc[-1] / (n_gt + 1e-16))
# Precision
precision_curve = tpc / (tpc + fpc)
p.append(tpc[-1] / (tpc[-1] + fpc[-1]))
# AP from recall-precision curve
ap.append(compute_ap(recall_curve, precision_curve))
return np.array(ap), unique_classes.astype('int32'), np.array(r), np.array(p)
def compute_ap(recall, precision):
""" Compute the average precision, given the recall and precision curves.
Code originally from https://github.com/rbgirshick/py-faster-rcnn.
# Arguments
recall: The recall curve (list).
precision: The precision curve (list).
# Returns
The average precision as computed in py-faster-rcnn.
"""
# correct AP calculation
# first append sentinel values at the end
mrec = | np.concatenate(([0.], recall, [1.])) | numpy.concatenate |
# -*- coding: utf-8 -*-
# @Author: <NAME>
# @Email: <EMAIL>
# @Date: 2021-05-14 19:42:00
# @Last Modified by: <NAME>
# @Last Modified time: 2021-05-20 10:09:19
import numpy as np
import matplotlib.pyplot as plt
from ..core import NeuronalBilayerSonophore, PulsedProtocol, Batch
from ..utils import si_format
from ..neurons import passiveNeuron
from ..postpro import gamma
from ..plt import harmonizeAxesLimits, hideSpines, hideTicks, addXscale, addYscale
from .coupled_nbls import CoupledSonophores
class Benchmark:
def __init__(self, a, nnodes, outdir=None):
self.a = a
self.nnodes = nnodes
self.outdir = outdir
def runSims(self, model, drives, tstim, covs):
''' Run full and sonic simulations for a specific combination drives,
pulsed protocol and coverage fractions, harmonize outputs and compute
normalized charge density profiles.
'''
Fdrive = drives[0].f
assert all(x.f == Fdrive for x in drives), 'frequencies do not match'
assert len(covs) == model.nnodes, 'coverages do not match model dimensions'
assert len(drives) == model.nnodes, 'drives do not match model dimensions'
# If not provided, compute stimulus duration from model passive properties
min_ncycles = 10
ntaumax_conv = 5
if tstim is None:
tstim = max(ntaumax_conv * model.taumax, min_ncycles / Fdrive)
# Recast stimulus duration as finite multiple of acoustic period
tstim = int(np.ceil(tstim * Fdrive)) / Fdrive # s
# Pulsed protocol
pp = PulsedProtocol(tstim, 0)
# Simulate/Load with full and sonic methods
data, meta = {}, {}
for method in ['full', 'sonic']:
data[method], meta[method] = model.simAndSave(
drives, pp, covs, method, outdir=self.outdir,
overwrite=False, minimize_output=True)
# Cycle-average full solution and interpolate sonic solution along same time vector
data['cycleavg'] = data['full'].cycleAveraged(1 / Fdrive)
data['sonic'] = data['sonic'].interpolate(data['cycleavg'].time)
# Compute normalized charge density profiles and add them to dataset
for simkey, simdata in data.items():
for nodekey, nodedata in simdata.items():
nodedata['Qnorm'] = nodedata['Qm'] / model.refpneuron.Cm0 * 1e3 # mV
# Return dataset
return data, meta
def computeGamma(self, data, *args):
''' Perform per-node gamma evaluation on charge density profiles. '''
gamma_dict = {}
for k in data['cycleavg'].keys():
Qnorms = [data[simkey][k]['Qnorm'].values for simkey in ['cycleavg', 'sonic']]
gamma_dict[k] = gamma(*Qnorms, *args)
# Discard 1st and last indexes of evaluation
gamma_dict[k] = | np.hstack(([np.nan], gamma_dict[k][1:-1], [np.nan])) | numpy.hstack |
"""traverse all the datastore vectors, delete the ones that
are never hit (excluding itself)
"""
import argparse
import numpy as np
import faiss
import ctypes
import time
import pickle
from collections import defaultdict
parser = argparse.ArgumentParser(description='')
parser.add_argument('--dstore-prefix', type=str,
help='the dstore vectors')
parser.add_argument('--dstore-size', type=int,
help='the dstore vectors')
parser.add_argument('--actual-dstore-size', type=int,
default=None, help='the dstore vectors')
parser.add_argument('--index', type=str,
help='the faiss index file')
parser.add_argument('--dstore-fp16', default=False,
action='store_true')
parser.add_argument('--nprobe', type=int, default=32)
parser.add_argument('--dimension', type=int, default=1024)
parser.add_argument('--k', type=int, default=1024,
help='the number of nearest neighbors')
parser.add_argument('--save', type=str,
help='the number of nearest neighbors')
# for the purpose of parallel computation
parser.add_argument('--start-point', type=int, default=0,
help='the starting point to traverse the datastore')
parser.add_argument('--num', type=int, default=1e11,
help='number of points to traverse')
args = parser.parse_args()
if args.actual_dstore_size is None:
args.actual_dstore_size = args.dstore_size
print(args)
print(f'shape ({args.dstore_size}, {args.dimension})')
if args.dstore_fp16:
keys = np.memmap(args.dstore_prefix + '_keys.npy', dtype=np.float16, mode='r', shape=(args.dstore_size, args.dimension))
vals = np.memmap(args.dstore_prefix + '_vals.npy', dtype=np.int, mode='r', shape=(args.dstore_size, 1))
else:
keys = np.memmap(args.dstore_prefix + '_keys.npy', dtype=np.float32, mode='r', shape=(args.dstore_size, args.dimension))
vals = | np.memmap(args.dstore_prefix + '_vals.npy', dtype=np.int, mode='r', shape=(args.dstore_size, 1)) | numpy.memmap |
import os
import os.path as osp
import gym
import time
import joblib
import logging
import numpy as np
import tensorflow as tf
from baselines import logger
from collections import deque
from baselines.common import set_global_seeds, explained_variance
from baselines.common.vec_env.subproc_vec_env import SubprocVecEnv
from baselines.common.atari_wrappers import wrap_deepmind
from baselines.common import tf_util
from utils import discount_with_dones, discount_moments_with_dones
from utils import Scheduler, make_path, find_trainable_variables
from utils import cat_entropy, mse
class Model(object):
def __init__(self, policy, ob_space, ac_space, nenvs, nsteps,
ent_coef=0.01, vf_coef=0.5, mf_coef=0.5, max_grad_norm=0.5, lr=7e-4,
alpha=0.99, epsilon=1e-5, total_timesteps=int(80e6), lrschedule='linear'):
sess = tf_util.make_session()
nact = ac_space.n
nbatch = nenvs*nsteps
A = tf.placeholder(tf.int32, [nbatch])
ADV = tf.placeholder(tf.float32, [nbatch])
ADV_MOMENT = tf.placeholder(tf.float32, [nbatch])
R = tf.placeholder(tf.float32, [nbatch])
R2 = tf.placeholder(tf.float32, [nbatch])
LR = tf.placeholder(tf.float32, [])
ENT_COEF = tf.placeholder(tf.float32, [])
step_model = policy(sess, ob_space, ac_space, nenvs, 1, reuse=False)
train_model = policy(sess, ob_space, ac_space, nenvs*nsteps, nsteps, reuse=True)
neglogpac = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=train_model.pi, labels=A)
pg_loss = tf.reduce_mean((ADV) * neglogpac)
vf_loss = tf.reduce_mean(mse(tf.squeeze(train_model.vf), R))
mf_loss = tf.reduce_mean(mse(tf.squeeze(train_model.mf), R2))
entropy = tf.reduce_mean(cat_entropy(train_model.pi))
ent_coef = Scheduler(v=ent_coef, nvalues=total_timesteps/10, schedule='step')
mf_coef = 0.01
loss = pg_loss - entropy*ENT_COEF + vf_loss * vf_coef + mf_loss * mf_coef
# loss = pg_loss + vf_loss * vf_coef + mf_loss * mf_coef
# loss = pg_loss - entropy*ent_coef + vf_loss * vf_coef
params = find_trainable_variables("model")
grads = tf.gradients(loss, params)
if max_grad_norm is not None:
grads, grad_norm = tf.clip_by_global_norm(grads, max_grad_norm)
grads = list(zip(grads, params))
trainer = tf.train.RMSPropOptimizer(learning_rate=LR, decay=alpha, epsilon=epsilon)
_train = trainer.apply_gradients(grads)
lr = Scheduler(v=lr, nvalues=total_timesteps, schedule=lrschedule)
def train(obs, states, rewards, rewards_square, masks, actions, values, moments):
values_random = np.random.normal(loc=values,scale=np.sqrt(np.maximum(moments - values ** 2,0)))
# values_random = values - np.sqrt(np.maximum(moments - values ** 2,0))
advs = rewards - values_random
# advs = (1 - 2 * rewards) * rewards - values + 2 * values * values
advs_moment = rewards_square - moments
# advs = (1 + 2 * rewards) * (rewards)
# advs_moment = rewards_square
for step in range(len(obs)):
cur_lr = lr.value()
cur_ent_coef = ent_coef.value()
td_map = {train_model.X:obs, A:actions, ADV:advs, ADV_MOMENT: advs_moment, R:rewards, R2:rewards_square, LR:cur_lr, ENT_COEF:cur_ent_coef}
if states is not None:
td_map[train_model.S] = states
td_map[train_model.M] = masks
policy_loss, value_loss, moment_loss, policy_entropy, _ = sess.run(
[pg_loss, vf_loss, mf_loss, entropy, _train],
td_map
)
return policy_loss, value_loss, moment_loss, policy_entropy
def save(save_path):
ps = sess.run(params)
make_path(osp.dirname(save_path))
joblib.dump(ps, save_path)
def load(load_path):
loaded_params = joblib.load(load_path)
restores = []
for p, loaded_p in zip(params, loaded_params):
restores.append(p.assign(loaded_p))
ps = sess.run(restores)
self.train = train
self.train_model = train_model
self.step_model = step_model
self.step = step_model.step
self.value = step_model.value
self.initial_state = step_model.initial_state
self.save = save
self.load = load
tf.global_variables_initializer().run(session=sess)
class Runner(object):
def __init__(self, env, model, nsteps=5, gamma=0.99):
self.env = env
self.model = model
nh, nw, nc = env.observation_space.shape
nenv = env.num_envs
self.batch_ob_shape = (nenv*nsteps, nh, nw, nc)
self.obs = np.zeros((nenv, nh, nw, nc), dtype=np.uint8)
self.nc = nc
obs = env.reset()
self.gamma = gamma
self.nsteps = nsteps
self.states = model.initial_state
self.dones = [False for _ in range(nenv)]
self.counters = np.zeros(nenv)
self.counters_fixed = []
def run(self):
mb_obs, mb_rewards, mb_rewards_square, mb_actions, mb_values, mb_moments, mb_dones = [],[],[],[],[],[],[]
mb_states = self.states
epinfos = []
for n in range(self.nsteps):
actions, values, moments, states, _ = self.model.step(self.obs, self.states, self.dones)
mb_obs.append(np.copy(self.obs))
mb_actions.append(actions)
mb_values.append(values)
mb_moments.append(moments)
mb_dones.append(self.dones)
obs, rewards, dones, infos = self.env.step(actions)
for info in infos:
maybeepinfo = info.get('episode')
if maybeepinfo: epinfos.append(maybeepinfo)
self.states = states
self.dones = dones
for n, done in enumerate(dones):
if done:
self.obs[n] = self.obs[n]*0
self.counters_fixed.append(self.counters[n])
self.counters[n] = 0
else:
self.counters[n] += rewards[n]
self.obs = obs
rewards = np.sign(rewards)
mb_rewards.append(rewards)
mb_rewards_square.append(rewards) # MAYBE ERRRRORR
mb_dones.append(self.dones)
#batch of steps to batch of rollouts
mb_obs = np.asarray(mb_obs, dtype=np.uint8).swapaxes(1, 0).reshape(self.batch_ob_shape)
mb_rewards = np.asarray(mb_rewards, dtype=np.float32).swapaxes(1, 0)
mb_rewards_square = | np.asarray(mb_rewards_square, dtype=np.float32) | numpy.asarray |
import unittest
import numpy as np
import vigra
from lazyflow.graph import Graph
from tsdl.targets import OpExponentiallySegmentedPattern
from tsdl.targets import OpDiscretize
from tsdl.targets import OpClassFromOneHot
from tsdl.targets import OpHostloadTarget
class TestOpExponentiallySegmentedpattern(unittest.TestCase):
def setUp(self):
self.baseline_size = 2
self.num_segments = 2
x = np.asarray([5, 7, 3, 4, 10, 2, 3])
x = vigra.taggedView(x, axistags='t')
self.data = x
def testSimple(self):
x = self.data
d = {"class": OpExponentiallySegmentedPattern,
"num_segments": self.num_segments,
"baseline_size": self.baseline_size}
op = OpExponentiallySegmentedPattern.build(d, graph=Graph())
op.NumSegments.setValue(self.num_segments)
op.BaselineSize.setValue(self.baseline_size)
op.Input.setValue(x)
exp = np.asarray([[6, 5, 3.5, 7, 6, 2.5, 1.5],
[4.75, 6, 4.75, 4.75, 3.75, 1.25, .75]]).T
y = op.Output[...].wait()
np.testing.assert_array_equal(y.shape, (7, 2))
np.testing.assert_array_almost_equal(y.T, exp.T)
y = op.Output[1:4, ...].wait()
np.testing.assert_array_equal(y.shape, (3, 2))
np.testing.assert_array_almost_equal(y.T, exp[1:4, :].T)
valid = op.Valid[...].wait()
np.testing.assert_array_equal(valid, [1, 1, 1, 1, 0, 0, 0])
valid = op.Valid[:5].wait()
np.testing.assert_array_equal(valid, [1, 1, 1, 1, 0])
valid = op.Valid[3:5].wait()
np.testing.assert_array_equal(valid, [1, 0])
class TestOpDiscretize(unittest.TestCase):
def setUp(self):
pass
def testSimple(self):
x = np.asarray([.15, .25, .68, .83, .01])
x = vigra.taggedView(x, axistags='t')
g = Graph()
op = OpDiscretize(graph=g)
op.Input.setValue(x)
op.NumLevels.setValue(5)
exp = [[True, False, False, False, False],
[False, True, False, False, False],
[False, False, False, True, False],
[False, False, False, False, True],
[True, False, False, False, False]]
exp = np.asarray(exp, dtype=np.bool).astype(np.float)
out = op.Output[...].wait()
np.testing.assert_array_equal(out, exp)
class TestOpClassFromOneHot(unittest.TestCase):
def setUp(self):
pass
def testSimple(self):
x = [[True, False, False, False, False],
[False, True, False, False, False],
[False, False, False, True, False],
[False, False, False, False, True],
[True, False, False, False, False]]
x = np.asarray(x, dtype=np.bool).astype(np.float)
g = Graph()
op = OpClassFromOneHot(graph=g)
op.Input.setValue(x)
out = op.Output[...].wait()
exp = | np.asarray([0, 1, 3, 4, 0], dtype=np.int) | numpy.asarray |
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
# Created by <NAME>
import os
import unittest
import cv2
import numpy as np
from click.testing import CliRunner
from mitoviz.cli import mitoviz_plot as cli
from mitoviz.tests.constants import (
SAMPLE_VCF, SAMPLE_HF_VCF, SAMPLE_MULTI_VCF, SAMPLE_HF_CSV, SAMPLE_HF_TSV,
SAMPLE_HF_TSV_COMM,
BASE_IMG, BASE_IMG_LABELS, BASE_IMG_LEGEND, BASE_IMG_SPLIT,
BASE_IMG_LINEAR, BASE_IMG_LINEAR_LABELS, BASE_IMG_LINEAR_LEGEND,
BASE_IMG_LINEAR_SPLIT, BASE_IMG_LABELS_HF, BASE_IMG_LINEAR_LABELS_HF,
BASE_IMG_PLOTLY, BASE_IMG_PLOTLY_LABELS_HF, BASE_IMG_PLOTLY_LEGEND,
BASE_IMG_PLOTLY_SPLIT,
BASE_IMG_PLOTLY_LINEAR, BASE_IMG_PLOTLY_LINEAR_LABELS_HF,
BASE_IMG_PLOTLY_LINEAR_LEGEND, BASE_IMG_PLOTLY_LINEAR_SPLIT,
BASE_HF_IMG, BASE_HF_IMG_LABELS, BASE_HF_IMG_LEGEND, BASE_HF_IMG_SPLIT,
BASE_HF_IMG_LINEAR, BASE_HF_IMG_LINEAR_LABELS, BASE_HF_IMG_LINEAR_LEGEND,
BASE_HF_IMG_LINEAR_SPLIT,
BASE_HF_IMG_PLOTLY, BASE_HF_IMG_PLOTLY_LEGEND, BASE_HF_IMG_PLOTLY_SPLIT,
BASE_HF_IMG_PLOTLY_LINEAR, BASE_HF_IMG_PLOTLY_LINEAR_LEGEND,
BASE_HF_IMG_PLOTLY_LINEAR_SPLIT,
BASE_HF_IMG_DF, BASE_HF_IMG_LABELS_DF, BASE_HF_IMG_LEGEND_DF,
BASE_HF_IMG_SPLIT_DF,
BASE_HF_IMG_LINEAR_DF, BASE_HF_IMG_LINEAR_LABELS_DF,
BASE_HF_IMG_LINEAR_LEGEND_DF, BASE_HF_IMG_LINEAR_SPLIT_DF,
BASE_HF_IMG_PLOTLY_DF, BASE_HF_IMG_PLOTLY_LEGEND_DF,
BASE_HF_IMG_PLOTLY_SPLIT_DF,
BASE_MULTI_IMG, BASE_MULTI_IMG_LABELS, BASE_MULTI_IMG_LEGEND,
BASE_MULTI_IMG_SPLIT,
BASE_MULTI_IMG_LINEAR, BASE_MULTI_IMG_LINEAR_LABELS,
BASE_MULTI_IMG_LINEAR_LEGEND, BASE_MULTI_IMG_LINEAR_SPLIT,
BASE_MULTI_IMG_PLOTLY, BASE_MULTI_IMG_PLOTLY_LEGEND,
BASE_MULTI_IMG_PLOTLY_SPLIT,
BASE_MULTI_IMG_PLOTLY_LINEAR, BASE_MULTI_IMG_PLOTLY_LINEAR_LEGEND,
BASE_MULTI_IMG_PLOTLY_LINEAR_SPLIT,
OUTPUT_IMG, OUTPUT_HF_IMG, OUTPUT_MULTI_IMG,
OUTPUT_HTML, OUTPUT_HF_HTML, OUTPUT_MULTI_HTML
)
class TestCliVcf(unittest.TestCase):
def setUp(self) -> None:
self.runner = CliRunner()
def test_cli_plot_help(self):
# Given/When
result = self.runner.invoke(cli.main, ["--help"])
# Then
self.assertEqual(0, result.exit_code)
self.assertIn("Show this message and exit.", result.output)
def test_cli_plot_polar(self):
# Given
base_img = cv2.imread(BASE_IMG)
# When
result = self.runner.invoke(cli.main, [SAMPLE_VCF])
result_img = cv2.imread("MITOVIZ001.png")
# Then
self.assertEqual(0, result.exit_code)
self.assertTrue(os.path.isfile("MITOVIZ001.png"))
diff = cv2.subtract(base_img, result_img)
self.assertFalse(np.any(diff))
# Cleanup
os.remove("MITOVIZ001.png")
def test_cli_plot_linear(self):
# Given
base_img = cv2.imread(BASE_IMG_LINEAR)
# When
result = self.runner.invoke(cli.main, [SAMPLE_VCF, "--linear"])
result_img = cv2.imread("MITOVIZ001.png")
# Then
self.assertEqual(0, result.exit_code)
self.assertTrue(os.path.isfile("MITOVIZ001.png"))
diff = cv2.subtract(base_img, result_img)
self.assertFalse(np.any(diff))
# Cleanup
os.remove("MITOVIZ001.png")
def test_cli_plot_polar_plotly(self):
# Given
base_img = BASE_IMG_PLOTLY
test_img = "MITOVIZ001.html"
# When
result = self.runner.invoke(cli.main, [SAMPLE_VCF, "--interactive"])
# Then
self.assertEqual(0, result.exit_code)
self.assertTrue(os.path.isfile(test_img))
self.assertEqual(os.path.getsize(base_img), os.path.getsize(test_img))
# Cleanup
os.remove(test_img)
def test_cli_plot_linear_plotly(self):
# Given
base_img = BASE_IMG_PLOTLY_LINEAR
test_img = "MITOVIZ001.html"
# When
result = self.runner.invoke(cli.main, [SAMPLE_VCF,
"--linear", "--interactive"])
# Then
self.assertEqual(0, result.exit_code)
self.assertTrue(os.path.isfile(test_img))
self.assertEqual(os.path.getsize(base_img), os.path.getsize(test_img))
# Cleanup
os.remove(test_img)
def test_cli_plot_polar_labels(self):
# Given
base_img = cv2.imread(BASE_IMG_LABELS)
# When
result = self.runner.invoke(cli.main,
[SAMPLE_VCF, "--labels"])
result_img = cv2.imread("MITOVIZ001.png")
# Then
self.assertEqual(0, result.exit_code)
self.assertTrue(os.path.isfile("MITOVIZ001.png"))
diff = cv2.subtract(base_img, result_img)
self.assertFalse(np.any(diff))
# Cleanup
os.remove("MITOVIZ001.png")
def test_cli_plot_polar_labels_hf(self):
# Given
base_img = cv2.imread(BASE_IMG_LABELS_HF)
# When
result = self.runner.invoke(cli.main,
[SAMPLE_VCF, "--labels", "--labels-hf"])
result_img = cv2.imread("MITOVIZ001.png")
# Then
self.assertEqual(0, result.exit_code)
self.assertTrue(os.path.isfile("MITOVIZ001.png"))
diff = cv2.subtract(base_img, result_img)
self.assertFalse(np.any(diff))
# Cleanup
os.remove("MITOVIZ001.png")
def test_cli_plot_linear_labels(self):
# Given
base_img = cv2.imread(BASE_IMG_LINEAR_LABELS)
# When
result = self.runner.invoke(cli.main,
[SAMPLE_VCF, "--labels", "--linear"])
result_img = cv2.imread("MITOVIZ001.png")
# Then
self.assertEqual(0, result.exit_code)
self.assertTrue(os.path.isfile("MITOVIZ001.png"))
diff = cv2.subtract(base_img, result_img)
self.assertFalse(np.any(diff))
# Cleanup
os.remove("MITOVIZ001.png")
def test_cli_plot_linear_labels_hf(self):
# Given
base_img = cv2.imread(BASE_IMG_LINEAR_LABELS_HF)
# When
result = self.runner.invoke(cli.main,
[SAMPLE_VCF, "--labels", "--labels-hf",
"--linear"])
result_img = cv2.imread("MITOVIZ001.png")
# Then
self.assertEqual(0, result.exit_code)
self.assertTrue(os.path.isfile("MITOVIZ001.png"))
diff = cv2.subtract(base_img, result_img)
self.assertFalse(np.any(diff))
# Cleanup
os.remove("MITOVIZ001.png")
def test_cli_plot_polar_legend(self):
# Given
base_img = cv2.imread(BASE_IMG_LEGEND)
# When
result = self.runner.invoke(cli.main,
[SAMPLE_VCF, "--legend"])
result_img = cv2.imread("MITOVIZ001.png")
# Then
self.assertEqual(0, result.exit_code)
self.assertTrue(os.path.isfile("MITOVIZ001.png"))
diff = cv2.subtract(base_img, result_img)
self.assertFalse(np.any(diff))
# Cleanup
os.remove("MITOVIZ001.png")
def test_cli_plot_linear_legend(self):
# Given
base_img = cv2.imread(BASE_IMG_LINEAR_LEGEND)
# When
result = self.runner.invoke(cli.main,
[SAMPLE_VCF, "--legend", "--linear"])
result_img = cv2.imread("MITOVIZ001.png")
# Then
self.assertEqual(0, result.exit_code)
self.assertTrue(os.path.isfile("MITOVIZ001.png"))
diff = cv2.subtract(base_img, result_img)
self.assertFalse(np.any(diff))
# Cleanup
os.remove("MITOVIZ001.png")
def test_cli_plot_polar_plotly_labels_hf(self):
# Given
base_img = BASE_IMG_PLOTLY_LABELS_HF
test_img = "MITOVIZ001.html"
# When
result = self.runner.invoke(cli.main,
[SAMPLE_VCF, "--labels-hf",
"--interactive"])
# Then
self.assertEqual(0, result.exit_code)
self.assertTrue(os.path.isfile(test_img))
self.assertEqual(os.path.getsize(base_img), os.path.getsize(test_img))
# Cleanup
os.remove(test_img)
def test_cli_plot_polar_plotly_legend(self):
# Given
base_img = BASE_IMG_PLOTLY_LEGEND
test_img = "MITOVIZ001.html"
# When
result = self.runner.invoke(cli.main,
[SAMPLE_VCF, "--legend", "--interactive"])
# Then
self.assertEqual(0, result.exit_code)
self.assertTrue(os.path.isfile(test_img))
self.assertEqual(os.path.getsize(base_img), os.path.getsize(test_img))
# Cleanup
os.remove(test_img)
def test_cli_plot_linear_plotly_labels_hf(self):
# Given
base_img = BASE_IMG_PLOTLY_LINEAR_LABELS_HF
test_img = "MITOVIZ001.html"
# When
result = self.runner.invoke(cli.main,
[SAMPLE_VCF, "--labels-hf", "--linear",
"--interactive"])
# Then
self.assertEqual(0, result.exit_code)
self.assertTrue(os.path.isfile(test_img))
self.assertEqual(os.path.getsize(base_img), os.path.getsize(test_img))
# Cleanup
os.remove(test_img)
def test_cli_plot_linear_plotly_legend(self):
# Given
base_img = BASE_IMG_PLOTLY_LINEAR_LEGEND
test_img = "MITOVIZ001.html"
# When
result = self.runner.invoke(cli.main,
[SAMPLE_VCF,
"--legend", "--linear", "--interactive"])
# Then
self.assertEqual(0, result.exit_code)
self.assertTrue(os.path.isfile(test_img))
self.assertEqual(os.path.getsize(base_img), os.path.getsize(test_img))
# Cleanup
os.remove(test_img)
def test_cli_plot_output(self):
# Given
base_img = cv2.imread(BASE_IMG)
# When
result = self.runner.invoke(cli.main,
[SAMPLE_VCF,
"--output", OUTPUT_IMG])
result_img = cv2.imread(OUTPUT_IMG)
# Then
self.assertEqual(0, result.exit_code)
self.assertTrue(os.path.isfile(OUTPUT_IMG))
diff = cv2.subtract(base_img, result_img)
self.assertFalse(np.any(diff))
# Cleanup
os.remove(OUTPUT_IMG)
def test_cli_plotly_output(self):
# Given
base_img = BASE_IMG_PLOTLY
test_img = OUTPUT_HTML
# When
result = self.runner.invoke(cli.main,
[SAMPLE_VCF, "--interactive",
"--output", test_img])
# Then
self.assertEqual(0, result.exit_code)
self.assertTrue(os.path.isfile(test_img))
self.assertEqual(os.path.getsize(base_img), os.path.getsize(test_img))
# Cleanup
os.remove(test_img)
def test_cli_plot_polar_split(self):
# Given
base_img = cv2.imread(BASE_IMG_SPLIT)
# When
result = self.runner.invoke(cli.main,
[SAMPLE_VCF, "--split"])
result_img = cv2.imread("MITOVIZ001.png")
# Then
self.assertEqual(0, result.exit_code)
self.assertTrue(os.path.isfile("MITOVIZ001.png"))
diff = cv2.subtract(base_img, result_img)
self.assertFalse(np.any(diff))
# Cleanup
os.remove("MITOVIZ001.png")
def test_cli_plot_linear_split(self):
# Given
base_img = cv2.imread(BASE_IMG_LINEAR_SPLIT)
# When
result = self.runner.invoke(cli.main,
[SAMPLE_VCF, "--split", "--linear"])
result_img = cv2.imread("MITOVIZ001.png")
# Then
self.assertEqual(0, result.exit_code)
self.assertTrue(os.path.isfile("MITOVIZ001.png"))
diff = cv2.subtract(base_img, result_img)
self.assertFalse(np.any(diff))
# Cleanup
os.remove("MITOVIZ001.png")
def test_cli_plot_polar_plotly_split(self):
# Given
base_img = BASE_IMG_PLOTLY_SPLIT
test_img = "MITOVIZ001.html"
# When
result = self.runner.invoke(cli.main,
[SAMPLE_VCF, "--split", "--interactive"])
# Then
self.assertEqual(0, result.exit_code)
self.assertTrue(os.path.isfile(test_img))
self.assertEqual(os.path.getsize(base_img), os.path.getsize(test_img))
# Cleanup
os.remove(test_img)
def test_cli_plot_linear_plotly_split(self):
# Given
base_img = BASE_IMG_PLOTLY_LINEAR_SPLIT
test_img = "MITOVIZ001.html"
# When
result = self.runner.invoke(cli.main,
[SAMPLE_VCF,
"--split", "--linear", "--interactive"])
# Then
self.assertEqual(0, result.exit_code)
self.assertTrue(os.path.isfile(test_img))
self.assertEqual(os.path.getsize(base_img), os.path.getsize(test_img))
# Cleanup
os.remove(test_img)
def test_cli_plot_hf_polar(self):
# Given
base_img = cv2.imread(BASE_HF_IMG)
# When
result = self.runner.invoke(cli.main, [SAMPLE_HF_VCF])
result_img = cv2.imread("HG00420.png")
# Then
self.assertEqual(0, result.exit_code)
self.assertTrue(os.path.isfile("HG00420.png"))
diff = cv2.subtract(base_img, result_img)
self.assertFalse(np.any(diff))
# Cleanup
os.remove("HG00420.png")
def test_cli_plot_hf_linear(self):
# Given
base_img = cv2.imread(BASE_HF_IMG_LINEAR)
# When
result = self.runner.invoke(cli.main, [SAMPLE_HF_VCF, "--linear"])
result_img = cv2.imread("HG00420.png")
# Then
self.assertEqual(0, result.exit_code)
self.assertTrue(os.path.isfile("HG00420.png"))
diff = cv2.subtract(base_img, result_img)
self.assertFalse(np.any(diff))
# Cleanup
os.remove("HG00420.png")
def test_cli_plot_hf_polar_plotly(self):
# Given
base_img = BASE_HF_IMG_PLOTLY
test_img = "HG00420.html"
# When
result = self.runner.invoke(cli.main, [SAMPLE_HF_VCF, "--interactive"])
# Then
self.assertEqual(0, result.exit_code)
self.assertTrue(os.path.isfile(test_img))
self.assertEqual(os.path.getsize(base_img), os.path.getsize(test_img))
# Cleanup
os.remove(test_img)
def test_cli_plot_hf_linear_plotly(self):
# Given
base_img = BASE_HF_IMG_PLOTLY_LINEAR
test_img = "HG00420.html"
# When
result = self.runner.invoke(cli.main, [SAMPLE_HF_VCF,
"--linear", "--interactive"])
# Then
self.assertEqual(0, result.exit_code)
self.assertTrue(os.path.isfile(test_img))
self.assertEqual(os.path.getsize(base_img), os.path.getsize(test_img))
# Cleanup
os.remove(test_img)
def test_cli_plot_hf_polar_labels(self):
# Given
base_img = cv2.imread(BASE_HF_IMG_LABELS)
# When
result = self.runner.invoke(cli.main,
[SAMPLE_HF_VCF, "--labels"])
result_img = cv2.imread("HG00420.png")
# Then
self.assertEqual(0, result.exit_code)
self.assertTrue(os.path.isfile("HG00420.png"))
diff = cv2.subtract(base_img, result_img)
self.assertFalse(np.any(diff))
# Cleanup
os.remove("HG00420.png")
def test_cli_plot_hf_linear_labels(self):
# Given
base_img = cv2.imread(BASE_HF_IMG_LINEAR_LABELS)
# When
result = self.runner.invoke(cli.main,
[SAMPLE_HF_VCF, "--labels", "--linear"])
result_img = cv2.imread("HG00420.png")
# Then
self.assertEqual(0, result.exit_code)
self.assertTrue(os.path.isfile("HG00420.png"))
diff = cv2.subtract(base_img, result_img)
self.assertFalse(np.any(diff))
# Cleanup
os.remove("HG00420.png")
def test_cli_plot_hf_polar_legend(self):
# Given
base_img = cv2.imread(BASE_HF_IMG_LEGEND)
# When
result = self.runner.invoke(cli.main,
[SAMPLE_HF_VCF, "--legend"])
result_img = cv2.imread("HG00420.png")
# Then
self.assertEqual(0, result.exit_code)
self.assertTrue(os.path.isfile("HG00420.png"))
diff = cv2.subtract(base_img, result_img)
self.assertFalse(np.any(diff))
# Cleanup
os.remove("HG00420.png")
def test_cli_plot_hf_linear_legend(self):
# Given
base_img = cv2.imread(BASE_HF_IMG_LINEAR_LEGEND)
# When
result = self.runner.invoke(cli.main,
[SAMPLE_HF_VCF, "--legend", "--linear"])
result_img = cv2.imread("HG00420.png")
# Then
self.assertEqual(0, result.exit_code)
self.assertTrue(os.path.isfile("HG00420.png"))
diff = cv2.subtract(base_img, result_img)
self.assertFalse(np.any(diff))
# Cleanup
os.remove("HG00420.png")
def test_cli_plot_hf_polar_plotly_legend(self):
# Given
base_img = BASE_HF_IMG_PLOTLY_LEGEND
test_img = "HG00420.html"
# When
result = self.runner.invoke(cli.main,
[SAMPLE_HF_VCF, "--legend",
"--interactive"])
# Then
self.assertEqual(0, result.exit_code)
self.assertTrue(os.path.isfile(test_img))
self.assertEqual(os.path.getsize(base_img), os.path.getsize(test_img))
# Cleanup
os.remove(test_img)
def test_cli_plot_hf_linear_plotly_legend(self):
# Given
base_img = BASE_HF_IMG_PLOTLY_LINEAR_LEGEND
test_img = "HG00420.html"
# When
result = self.runner.invoke(cli.main,
[SAMPLE_HF_VCF,
"--linear", "--legend", "--interactive"])
# Then
self.assertEqual(0, result.exit_code)
self.assertTrue(os.path.isfile(test_img))
self.assertEqual(os.path.getsize(base_img), os.path.getsize(test_img))
# Cleanup
os.remove(test_img)
def test_cli_plot_hf_output(self):
# Given
base_img = cv2.imread(BASE_HF_IMG)
# When
result = self.runner.invoke(cli.main,
[SAMPLE_HF_VCF,
"--output", OUTPUT_HF_IMG])
result_img = cv2.imread(OUTPUT_HF_IMG)
# Then
self.assertEqual(0, result.exit_code)
self.assertTrue(os.path.isfile(OUTPUT_HF_IMG))
diff = cv2.subtract(base_img, result_img)
self.assertFalse(np.any(diff))
# Cleanup
os.remove(OUTPUT_HF_IMG)
def test_cli_plotly_hf_output(self):
# Given
base_img = BASE_HF_IMG_PLOTLY
test_img = OUTPUT_HF_HTML
# When
result = self.runner.invoke(cli.main,
[SAMPLE_HF_VCF, "--interactive",
"--output", test_img])
# Then
self.assertEqual(0, result.exit_code)
self.assertTrue(os.path.isfile(test_img))
self.assertEqual(os.path.getsize(base_img), os.path.getsize(test_img))
# Cleanup
os.remove(test_img)
def test_cli_plot_hf_polar_split(self):
# Given
base_img = cv2.imread(BASE_HF_IMG_SPLIT)
# When
result = self.runner.invoke(cli.main,
[SAMPLE_HF_VCF, "--split"])
result_img = cv2.imread("HG00420.png")
# Then
self.assertEqual(0, result.exit_code)
self.assertTrue(os.path.isfile("HG00420.png"))
diff = cv2.subtract(base_img, result_img)
self.assertFalse(np.any(diff))
# Cleanup
os.remove("HG00420.png")
def test_cli_plot_hf_linear_split(self):
# Given
base_img = cv2.imread(BASE_HF_IMG_LINEAR_SPLIT)
# When
result = self.runner.invoke(cli.main,
[SAMPLE_HF_VCF, "--split", "--linear"])
result_img = cv2.imread("HG00420.png")
# Then
self.assertEqual(0, result.exit_code)
self.assertTrue(os.path.isfile("HG00420.png"))
diff = cv2.subtract(base_img, result_img)
self.assertFalse(np.any(diff))
# Cleanup
os.remove("HG00420.png")
def test_cli_plot_hf_polar_plotly_split(self):
# Given
base_img = BASE_HF_IMG_PLOTLY_SPLIT
test_img = "HG00420.html"
# When
result = self.runner.invoke(cli.main,
[SAMPLE_HF_VCF, "--split",
"--interactive"])
# Then
self.assertEqual(0, result.exit_code)
self.assertTrue(os.path.isfile(test_img))
self.assertEqual(os.path.getsize(base_img), os.path.getsize(test_img))
# Cleanup
os.remove(test_img)
def test_cli_plot_hf_linear_plotly_split(self):
# Given
base_img = BASE_HF_IMG_PLOTLY_LINEAR_SPLIT
test_img = "HG00420.html"
# When
result = self.runner.invoke(cli.main,
[SAMPLE_HF_VCF,
"--linear", "--split", "--interactive"])
# Then
self.assertEqual(0, result.exit_code)
self.assertTrue(os.path.isfile(test_img))
self.assertEqual(os.path.getsize(base_img), os.path.getsize(test_img))
# Cleanup
os.remove(test_img)
def test_cli_plot_polar_sample_multi(self):
# Given
base_img = cv2.imread(BASE_MULTI_IMG)
# When
result = self.runner.invoke(cli.main,
[SAMPLE_MULTI_VCF,
"--sample", "SRR1777294",
"--output", OUTPUT_MULTI_IMG])
result_img = cv2.imread(OUTPUT_MULTI_IMG)
# Then
self.assertEqual(0, result.exit_code)
self.assertTrue(os.path.isfile(OUTPUT_MULTI_IMG))
diff = cv2.subtract(base_img, result_img)
self.assertFalse(np.any(diff))
# Cleanup
os.remove(OUTPUT_MULTI_IMG)
def test_cli_plot_linear_sample_multi(self):
# Given
base_img = cv2.imread(BASE_MULTI_IMG_LINEAR)
# When
result = self.runner.invoke(cli.main,
[SAMPLE_MULTI_VCF,
"--sample", "SRR1777294",
"--output", OUTPUT_MULTI_IMG,
"--linear"])
result_img = cv2.imread(OUTPUT_MULTI_IMG)
# Then
self.assertEqual(0, result.exit_code)
self.assertTrue(os.path.isfile(OUTPUT_MULTI_IMG))
diff = cv2.subtract(base_img, result_img)
self.assertFalse(np.any(diff))
# Cleanup
os.remove(OUTPUT_MULTI_IMG)
def test_cli_plot_polar_plotly_sample_multi(self):
# Given
base_img = BASE_MULTI_IMG_PLOTLY
test_img = OUTPUT_MULTI_HTML
# When
result = self.runner.invoke(cli.main,
[SAMPLE_MULTI_VCF,
"--sample", "SRR1777294",
"--output", test_img,
"--interactive"])
# Then
self.assertEqual(0, result.exit_code)
self.assertTrue(os.path.isfile(test_img))
self.assertEqual(os.path.getsize(base_img), os.path.getsize(test_img))
# Cleanup
os.remove(test_img)
def test_cli_plot_linear_plotly_sample_multi(self):
# Given
base_img = BASE_MULTI_IMG_PLOTLY_LINEAR
test_img = OUTPUT_MULTI_HTML
# When
result = self.runner.invoke(cli.main,
[SAMPLE_MULTI_VCF, "--linear",
"--sample", "SRR1777294",
"--output", test_img,
"--interactive"])
# Then
self.assertEqual(0, result.exit_code)
self.assertTrue(os.path.isfile(test_img))
self.assertEqual(os.path.getsize(base_img), os.path.getsize(test_img))
# Cleanup
os.remove(test_img)
def test_cli_plot_polar_sample_multi_labels(self):
# Given
base_img = cv2.imread(BASE_MULTI_IMG_LABELS)
# When
result = self.runner.invoke(cli.main,
[SAMPLE_MULTI_VCF,
"--sample", "SRR1777294",
"--output", OUTPUT_MULTI_IMG,
"--labels"])
result_img = cv2.imread(OUTPUT_MULTI_IMG)
# Then
self.assertEqual(0, result.exit_code)
self.assertTrue(os.path.isfile(OUTPUT_MULTI_IMG))
diff = cv2.subtract(base_img, result_img)
self.assertFalse(np.any(diff))
# Cleanup
os.remove(OUTPUT_MULTI_IMG)
def test_cli_plot_linear_sample_multi_labels(self):
# Given
base_img = cv2.imread(BASE_MULTI_IMG_LINEAR_LABELS)
# When
result = self.runner.invoke(cli.main,
[SAMPLE_MULTI_VCF,
"--sample", "SRR1777294",
"--output", OUTPUT_MULTI_IMG,
"--labels", "--linear"])
result_img = cv2.imread(OUTPUT_MULTI_IMG)
# Then
self.assertEqual(0, result.exit_code)
self.assertTrue(os.path.isfile(OUTPUT_MULTI_IMG))
diff = cv2.subtract(base_img, result_img)
self.assertFalse(np.any(diff))
# Cleanup
os.remove(OUTPUT_MULTI_IMG)
def test_cli_plot_polar_sample_multi_legend(self):
# Given
base_img = cv2.imread(BASE_MULTI_IMG_LEGEND)
# When
result = self.runner.invoke(cli.main,
[SAMPLE_MULTI_VCF,
"--sample", "SRR1777294",
"--output", OUTPUT_MULTI_IMG,
"--legend"])
result_img = cv2.imread(OUTPUT_MULTI_IMG)
# Then
self.assertEqual(0, result.exit_code)
self.assertTrue(os.path.isfile(OUTPUT_MULTI_IMG))
diff = cv2.subtract(base_img, result_img)
self.assertFalse(np.any(diff))
# Cleanup
os.remove(OUTPUT_MULTI_IMG)
def test_cli_plot_linear_sample_multi_legend(self):
# Given
base_img = cv2.imread(BASE_MULTI_IMG_LINEAR_LEGEND)
# When
result = self.runner.invoke(cli.main,
[SAMPLE_MULTI_VCF,
"--sample", "SRR1777294",
"--output", OUTPUT_MULTI_IMG,
"--legend", "--linear"])
result_img = cv2.imread(OUTPUT_MULTI_IMG)
# Then
self.assertEqual(0, result.exit_code)
self.assertTrue(os.path.isfile(OUTPUT_MULTI_IMG))
diff = cv2.subtract(base_img, result_img)
self.assertFalse(np.any(diff))
# Cleanup
os.remove(OUTPUT_MULTI_IMG)
def test_cli_plot_polar_plotly_sample_multi_legend(self):
# Given
base_img = BASE_MULTI_IMG_PLOTLY_LEGEND
test_img = OUTPUT_MULTI_HTML
# When
result = self.runner.invoke(cli.main,
[SAMPLE_MULTI_VCF,
"--sample", "SRR1777294",
"--output", test_img,
"--legend", "--interactive"])
# Then
self.assertEqual(0, result.exit_code)
self.assertTrue(os.path.isfile(test_img))
self.assertEqual(os.path.getsize(base_img), os.path.getsize(test_img))
# Cleanup
os.remove(test_img)
def test_cli_plot_linear_plotly_sample_multi_legend(self):
# Given
base_img = BASE_MULTI_IMG_PLOTLY_LINEAR_LEGEND
test_img = OUTPUT_MULTI_HTML
# When
result = self.runner.invoke(cli.main,
[SAMPLE_MULTI_VCF, "--linear",
"--sample", "SRR1777294",
"--output", test_img,
"--legend", "--interactive"])
# Then
self.assertEqual(0, result.exit_code)
self.assertTrue(os.path.isfile(test_img))
self.assertEqual(os.path.getsize(base_img), os.path.getsize(test_img))
# Cleanup
os.remove(test_img)
def test_cli_plot_polar_sample_multi_split(self):
# Given
base_img = cv2.imread(BASE_MULTI_IMG_SPLIT)
# When
result = self.runner.invoke(cli.main,
[SAMPLE_MULTI_VCF,
"--sample", "SRR1777294",
"--output", OUTPUT_MULTI_IMG,
"--split"])
result_img = cv2.imread(OUTPUT_MULTI_IMG)
# Then
self.assertEqual(0, result.exit_code)
self.assertTrue(os.path.isfile(OUTPUT_MULTI_IMG))
diff = cv2.subtract(base_img, result_img)
self.assertFalse(np.any(diff))
# Cleanup
os.remove(OUTPUT_MULTI_IMG)
def test_cli_plot_linear_sample_multi_split(self):
# Given
base_img = cv2.imread(BASE_MULTI_IMG_LINEAR_SPLIT)
# When
result = self.runner.invoke(cli.main,
[SAMPLE_MULTI_VCF,
"--sample", "SRR1777294",
"--output", OUTPUT_MULTI_IMG,
"--split", "--linear"])
result_img = cv2.imread(OUTPUT_MULTI_IMG)
# Then
self.assertEqual(0, result.exit_code)
self.assertTrue(os.path.isfile(OUTPUT_MULTI_IMG))
diff = cv2.subtract(base_img, result_img)
self.assertFalse(np.any(diff))
# Cleanup
os.remove(OUTPUT_MULTI_IMG)
def test_cli_plot_polar_plotly_sample_multi_split(self):
# Given
base_img = BASE_MULTI_IMG_PLOTLY_SPLIT
test_img = OUTPUT_MULTI_HTML
# When
result = self.runner.invoke(cli.main,
[SAMPLE_MULTI_VCF,
"--sample", "SRR1777294",
"--output", test_img,
"--split", "--interactive"])
# Then
self.assertEqual(0, result.exit_code)
self.assertTrue(os.path.isfile(test_img))
self.assertEqual(os.path.getsize(base_img), os.path.getsize(test_img))
# Cleanup
os.remove(test_img)
def test_cli_plot_linear_plotly_sample_multi_split(self):
# Given
base_img = BASE_MULTI_IMG_PLOTLY_LINEAR_SPLIT
test_img = OUTPUT_MULTI_HTML
# When
result = self.runner.invoke(cli.main,
[SAMPLE_MULTI_VCF, "--linear",
"--sample", "SRR1777294",
"--output", test_img,
"--split", "--interactive"])
# Then
self.assertEqual(0, result.exit_code)
self.assertTrue(os.path.isfile(test_img))
self.assertEqual(os.path.getsize(base_img), os.path.getsize(test_img))
# Cleanup
os.remove(test_img)
class TestCliCsv(unittest.TestCase):
def setUp(self) -> None:
self.runner = CliRunner()
def test_cli_plot_polar_csv(self):
# Given
base_img = cv2.imread(BASE_HF_IMG_DF)
# When
result = self.runner.invoke(cli.main, [SAMPLE_HF_CSV])
result_img = cv2.imread("HG00420.png")
# Then
self.assertEqual(0, result.exit_code)
self.assertTrue(os.path.isfile("HG00420.png"))
diff = cv2.subtract(base_img, result_img)
self.assertFalse(np.any(diff))
# Cleanup
os.remove("HG00420.png")
def test_cli_plot_linear_csv(self):
# Given
base_img = cv2.imread(BASE_HF_IMG_LINEAR_DF)
# When
result = self.runner.invoke(cli.main, [SAMPLE_HF_CSV, "--linear"])
result_img = cv2.imread("HG00420.png")
# Then
self.assertEqual(0, result.exit_code)
self.assertTrue(os.path.isfile("HG00420.png"))
diff = cv2.subtract(base_img, result_img)
self.assertFalse(np.any(diff))
# Cleanup
os.remove("HG00420.png")
def test_cli_plot_polar_plotly_csv(self):
# Given
base_img = BASE_HF_IMG_PLOTLY_DF
test_img = "HG00420.html"
# When
result = self.runner.invoke(cli.main, [SAMPLE_HF_CSV, "--interactive"])
# Then
self.assertEqual(0, result.exit_code)
self.assertTrue(os.path.isfile(test_img))
self.assertEqual(os.path.getsize(base_img), os.path.getsize(test_img))
# Cleanup
os.remove(test_img)
def test_cli_plot_linear_plotly_csv(self):
# Given
test_img = "HG00420.html"
# When
result = self.runner.invoke(cli.main, [SAMPLE_HF_CSV,
"--linear", "--interactive"])
# Then
self.assertEqual(0, result.exit_code)
self.assertTrue(os.path.isfile(test_img))
# Cleanup
os.remove(test_img)
def test_cli_plot_polar_csv_labels(self):
# Given
base_img = cv2.imread(BASE_HF_IMG_LABELS_DF)
# When
result = self.runner.invoke(cli.main,
[SAMPLE_HF_CSV, "--labels"])
result_img = cv2.imread("HG00420.png")
# Then
self.assertEqual(0, result.exit_code)
self.assertTrue(os.path.isfile("HG00420.png"))
diff = cv2.subtract(base_img, result_img)
self.assertFalse(np.any(diff))
# Cleanup
os.remove("HG00420.png")
def test_cli_plot_linear_csv_labels(self):
# Given
base_img = cv2.imread(BASE_HF_IMG_LINEAR_LABELS_DF)
# When
result = self.runner.invoke(cli.main,
[SAMPLE_HF_CSV, "--labels", "--linear"])
result_img = cv2.imread("HG00420.png")
# Then
self.assertEqual(0, result.exit_code)
self.assertTrue(os.path.isfile("HG00420.png"))
diff = cv2.subtract(base_img, result_img)
self.assertFalse(np.any(diff))
# Cleanup
os.remove("HG00420.png")
def test_cli_plot_polar_csv_legend(self):
# Given
base_img = cv2.imread(BASE_HF_IMG_LEGEND_DF)
# When
result = self.runner.invoke(cli.main,
[SAMPLE_HF_CSV, "--legend"])
result_img = cv2.imread("HG00420.png")
# Then
self.assertEqual(0, result.exit_code)
self.assertTrue(os.path.isfile("HG00420.png"))
diff = cv2.subtract(base_img, result_img)
self.assertFalse(np.any(diff))
# Cleanup
os.remove("HG00420.png")
def test_cli_plot_linear_csv_legend(self):
# Given
base_img = cv2.imread(BASE_HF_IMG_LINEAR_LEGEND_DF)
# When
result = self.runner.invoke(cli.main,
[SAMPLE_HF_CSV, "--legend", "--linear"])
result_img = cv2.imread("HG00420.png")
# Then
self.assertEqual(0, result.exit_code)
self.assertTrue(os.path.isfile("HG00420.png"))
diff = cv2.subtract(base_img, result_img)
self.assertFalse(np.any(diff))
# Cleanup
os.remove("HG00420.png")
def test_cli_plot_polar_plotly_csv_legend(self):
# Given
base_img = BASE_HF_IMG_PLOTLY_LEGEND_DF
test_img = "HG00420.html"
# When
result = self.runner.invoke(cli.main,
[SAMPLE_HF_CSV, "--legend",
"--interactive"])
# Then
self.assertEqual(0, result.exit_code)
self.assertTrue(os.path.isfile(test_img))
self.assertEqual(os.path.getsize(base_img), os.path.getsize(test_img))
# Cleanup
os.remove(test_img)
def test_cli_plot_linear_plotly_csv_legend(self):
# Given
test_img = "HG00420.html"
# When
result = self.runner.invoke(cli.main,
[SAMPLE_HF_CSV,
"--linear", "--legend", "--interactive"])
# Then
self.assertEqual(0, result.exit_code)
self.assertTrue(os.path.isfile(test_img))
# Cleanup
os.remove(test_img)
def test_cli_plot_polar_csv_split(self):
# Given
base_img = cv2.imread(BASE_HF_IMG_SPLIT_DF)
# When
result = self.runner.invoke(cli.main,
[SAMPLE_HF_CSV, "--split"])
result_img = cv2.imread("HG00420.png")
# Then
self.assertEqual(0, result.exit_code)
self.assertTrue(os.path.isfile("HG00420.png"))
diff = cv2.subtract(base_img, result_img)
self.assertFalse( | np.any(diff) | numpy.any |
import numpy as np
import pytest
from numpy.testing import assert_allclose
from einsteinpy import constant
from einsteinpy.metric import BaseMetric, Kerr, KerrNewman
_c = constant.c.value
_G = constant.G.value
_Cc = constant.coulombs_const.value
@pytest.fixture()
def test_input():
"""
Test input for some functions below
"""
r = 0.1
theta = 4 * np.pi / 5
M = 1e23
a = 0.99
return r, theta, M, a
def test_compare_kerr_kerrnewman_dmetric_dx(test_input):
"""
Tests, if the metric derivatives for Kerr & Kerr-Newman metrics match, when Q -> 0
"""
r, theta, M, a = test_input
x_vec = np.array([0., r, theta, 0.])
mk = Kerr(coords="BL", M=M, a=a)
mkn = KerrNewman(coords="BL", M=M, a=a, Q=0.)
mkdx = mk._dg_dx_bl(x_vec)
mkndx = mkn._dg_dx_bl(x_vec)
assert_allclose(mkdx, mkndx, rtol=1e-10)
def test_christoffels_kerr_newman(test_input):
"""
Compares output produced by optimized function, with that, produced via general method (formula)
"""
r, theta, M, a = test_input
Q = 1.0
x_vec = np.array([0., r, theta, 0.])
# Output produced by the optimized function
mkn = KerrNewman(coords="BL", M=M, a=a, Q=Q)
chl1 = mkn.christoffels(x_vec)
# Calculated using formula
g_contra = mkn.metric_contravariant(x_vec)
dgdx = mkn._dg_dx_bl(x_vec)
chl2 = np.zeros(shape=(4, 4, 4), dtype=float)
tmp = np.array([i for i in range(4 ** 3)])
for t in tmp:
i = int(t / (4 ** 2)) % 4
k = int(t / 4) % 4
index = t % 4
for m in range(4):
chl2[i, k, index] += g_contra[i, m] * (
dgdx[index, m, k] + dgdx[k, m, index] - dgdx[m, k, index]
)
chl2 = np.multiply(chl2, 0.5)
assert_allclose(chl2, chl1, rtol=1e-10)
def test_compare_kerr_kerrnewman_christoffels(test_input):
"""
Compares KerrNewman Christoffel Symbols, with that of Kerr metric, when Q -> 0
"""
r, theta, M, a = test_input
x_vec = np.array([0., r, theta, 0.])
mk = Kerr(coords="BL", M=M, a=a)
mkn = KerrNewman(coords="BL", M=M, a=a, Q=0.)
mk_chl = mk.christoffels(x_vec)
mkn_chl = mkn.christoffels(x_vec)
assert_allclose(mk_chl, mkn_chl, rtol=1e-8)
def test_electromagnetic_potential_from_em_potential_vector(test_input):
"""
Tests, if the calculated EM 4-Potential is the same as that from the formula
"""
r, theta, M, a = test_input
Q = 15.5
# Using function from module
mkn = KerrNewman(coords="BL", M=M, a=a, Q=Q)
mkn_pot = mkn.em_potential_covariant(r, theta, M=M, a=0., Q=Q)
# Calculated using formula
calc_pot = np.zeros((4,), dtype=float)
calc_pot[0] = (Q / ((_c ** 2) * r)) * np.sqrt(_G * _Cc)
assert_allclose(mkn_pot, calc_pot, rtol=1e-8)
def test_electromagnetic_potential_contravariant(test_input):
"""
Tests, if the calculated EM 4-Potential, in contravariant form, is the same as that \
calculated manually
"""
r, theta, M, a = test_input
Q = 15.5
x_vec = np.array([0., r, theta, 0.], dtype=float)
mkn = KerrNewman(coords="BL", M=M, a=a, Q=Q)
mkn_contra_mat = mkn.metric_contravariant(x_vec)
# Using function from module
mkn_pot_contra = mkn.em_potential_contravariant(r, theta, M=M, a=a, Q=Q)
# Calculated using formula
alpha = mkn.alpha(M, a)
rho2 = mkn.rho(r, theta, M, a) ** 2
r_Q = np.sqrt((Q ** 2 * _G * _Cc) / _c ** 4)
fac = r * r_Q / rho2
calc_pot_cov = np.array([fac, 0., 0., -alpha * np.sin(theta)**2 * fac], dtype=float)
calc_pot_contra = mkn_contra_mat @ calc_pot_cov
| assert_allclose(mkn_pot_contra, calc_pot_contra, rtol=1e-8) | numpy.testing.assert_allclose |
import numpy as np
import vg
def test_average():
np.testing.assert_array_equal(
vg.average(np.array([[1.0, 2.0, 3.0], [-6.0, -9.0, -15.0]])),
np.array([-2.5, -3.5, -6.0]),
)
np.testing.assert_array_equal(
vg.average( | np.array([[1.0, 2.0, 3.0], [-6.0, -9.0, -15.0]]) | numpy.array |
import numpy as np
import scipy.special as special
import scipy.spatial.distance as distfuncs
def cart2sph(x, y, z):
"""Conversion from Cartesian to spherical coordinates
Parameters
------
x, y, z : Position in Cartesian coordinates
Returns
------
phi, theta, r: Azimuth angle, zenith angle, distance
"""
r_xy = | np.sqrt(x**2 + y**2) | numpy.sqrt |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
import schirm_tools as schirm
import experiments as expe
data_path = './data/'
result_path = './results/'
result_path_comb = './results/'
compute = False
Mall = | np.array([2,3,4,5,6,7,8,12,16,20]) | numpy.array |
# ==============================================================================
#
# 888b d888 8888888 8888888b. d8888 .d8888b.
# 8888b d8888 888 888 "Y88b d88888 d88P Y88b
# 88888b.d88888 888 888 888 d88P888 Y88b.
# 888Y88888P888 888 888 888 d88P 888 "Y888b.
# 888 Y888P 888 888 888 888 d88P 888 "Y88b.
# 888 Y8P 888 888 888 888 d88P 888 "888
# 888 " 888 888 888 .d88P d8888888888 Y88b d88P
# 888 888 8888888 8888888P" d88P 888 "Y8888P"
#
# --- Multiple Imputation with Denoising Autoencoders
# Copyright 2020 <NAME>, <NAME>, and <NAME>. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import tensorflow as tf
from sklearn.metrics import mean_squared_error as mse
import random
class tfVersionError(Exception):
pass
if tf.__version__[0] == '2':
raise tfVersionError("midas v1.0 is currently only compatible with TensorFlow 1.X")
elif tf.__version__[0] == '1':
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
class Midas(object):
def __init__(self,
layer_structure= [256, 256, 256],
learn_rate= 1e-4,
input_drop= 0.8,
train_batch = 16,
savepath= 'tmp/MIDAS',
seed= None,
output_layers= 'reversed',
loss_scale= 1,
init_scale= 1,
vae_layer= False,
individual_outputs= False,
manual_outputs= False,
output_structure= [16, 16, 32],
latent_space_size = 4,
cont_adj= 1.0,
binary_adj= 1.0,
softmax_adj= 1.0,
dropout_level = 0.5,
weight_decay = 'default',
vae_alpha = 1.0,
act = tf.nn.elu,
vae_sample_var = 1.0,
noise_type = 'bernoulli',
kld_min = 0.01
):
"""
Initialiser. Called separately to 'build_model' to allow for out-of-memory
datasets. All key hyperparameters are entered at this stage, as the model
construction methods only deal with the dataset.
Args:
layer_structure: List of integers. The number of nodes in each layer of the
network (default = [256, 256, 256], denoting a three-layer network with 256
nodes per layer). Larger networks can learn more complex data structures but
require longer training and are more prone to overfitting.
learn_rate: Float. The learning rate (gamma; default = 0.0001), which
controls the size of the weight adjustment in each training epoch. In general,
higher values reduce training time at the expense of less accurate results.
input_drop: Float between 0 and 1. The probability of corruption for input
columns in training mini-batches (default = 0.8). Higher values increase
training time but reduce the risk of overfitting. In our experience, values
between 0.7 and 0.95 deliver the best performance.
train_batch: Integer. The number of observations in training mini-batches
(default = 16). Common choices are 8, 16, 32, 64, and 128; powers of 2 tend to
enhance memory efficiency. In general, smaller sizes lead to faster convergence
at the cost of greater noise and thus less accurate estimates of the error
gradient. Where memory management is a concern, they should be favored.
savepath: String. The location to which the trained model will be saved.
seed: Integer. The value to which Python's pseudo-random number
generator is initialized. This enables users to ensure that data shuffling,
weight and bias initialization, and missingness indicator vectors are
reproducible.
loss_scale: Float. A constant by which the RMSE loss functions are multiplied
(default = 1). This hyperparameter performs a similar function to the learning
rate. If loss during training is very large, increasing its value can help to
prevent overtraining.
init_scale: Float. The numerator of the variance component of Xavier Initialisation
equation (default = 1). In very deep networks, higher values may help to prevent
extreme gradients (though this problem is less common with ELU activation functions).
softmax_adj: Float. A constant by which the cross-entropy loss functions are
multiplied (default = 1). This hyperparameter is the equivalent of loss_scale
for categorical variables. If cross-entropy loss falls at a consistently faster
rate than RMSE during training, a lower value may help to redress this imbalance.
vae_layer: Boolean. Specifies whether to include a variational autoencoder layer in
the network (default = False), one of the key diagnostic tools included in midas.
If set to true, variational autoencoder hyperparameters must be specified via a number
of additional arguments.
latent_space_size: Integer. The number of normal dimensions used to parameterize the
latent space when vae_layer = True.
vae_sample_var: Float. The sampling variance of the normal distributions used to
parameterize the latent space when vae_layer = True.
vae_alpha: Float. The strength of the prior imposed on the Kullback-Leibler divergence term
in the variational autoencoder loss functions.
kld_min: Float. The minimum value of the Kullback-Leibler divergence term in the variational
autoencoder loss functions.
Returns:
Self
"""
if type(layer_structure) == list:
self.layer_structure = layer_structure
else:
raise ValueError("Layer structure must be specified within a list")
if type(output_layers) == list:
self.output_layers = output_layers
elif output_layers == 'reversed':
self.output_layers = layer_structure.copy()
self.output_layers.reverse()
else:
raise ValueError("Please specify correct output layer structure")
self.learn_rate = learn_rate
self.input_drop = input_drop
self.model_built = False
self.savepath = savepath
self.model = None
self.additional_data = None
self.train_batch = train_batch
self.seed = seed
self.input_is_pipeline = False
self.input_pipeline = None
self.vae_layer = vae_layer
self.loss_scale = loss_scale
self.init_scale = init_scale
self.individual_outputs = individual_outputs
self.manual_outputs = manual_outputs
self.vae_sample_var = vae_sample_var
self.latent_space_size = latent_space_size
self.dropout_level = dropout_level
self.prior_strength = vae_alpha
self.kld_min = kld_min
if weight_decay == 'default':
self.weight_decay = 'default'
elif type(weight_decay) == float:
self.weight_decay = weight_decay
else:
raise ValueError("Weight decay argument accepts either 'standard' (string) "\
"or floating point")
if type(output_structure) == int:
self.output_structure = [output_structure]*3
elif (individual_outputs == True) | (len(output_structure) ==3):
self.output_structure = output_structure
else:
raise TypeError("The output transform assignment must take the form of "\
"an integer, a list of three elements (cont, bin, cat), "\
"or individual values must be specified.")
self.cont_adj = cont_adj
self.binary_adj = binary_adj
self.softmax_adj = softmax_adj
self.act = act
self.noise_type = noise_type
if self.seed is not None:
np.random.seed(self.seed)
random.seed(self.seed)
def _batch_iter(self,
train_data,
na_mask,
b_size = 16):
"""
Function for handling the batch feeds for training loops
"""
indices = np.arange(train_data.shape[0])
np.random.shuffle(indices)
for start_idx in range(0, train_data.shape[0] - b_size + 1, b_size):
excerpt = indices[start_idx:start_idx + b_size]
if self.additional_data is None:
yield train_data[excerpt], na_mask[excerpt]
else:
yield train_data[excerpt], na_mask[excerpt], self.additional_data.values[excerpt]
def _batch_iter_output(self,
train_data,
b_size = 256):
"""
Identical to _batch_iter(), although designed for a single datasource
"""
indices = np.arange(train_data.shape[0])
for start_idx in range(0, train_data.shape[0], b_size):
excerpt = indices[start_idx:start_idx + b_size]
if self.additional_data is None:
yield train_data[excerpt]
else:
yield train_data[excerpt], self.additional_data.values[excerpt]
def _batch_iter_zsample(self,
data,
b_size = 256):
"""
Identical to _batch_iter(), although designed for sampling from latent
"""
indices = np.arange(data.shape[0])
for start_idx in range(0, data.shape[0], b_size):
excerpt = indices[start_idx:start_idx + b_size]
yield data[excerpt]
def _build_layer(self,
X,
weight_matrix,
bias_vec,
dropout_rate = 0.5,
output_layer= False):
"""
Constructs layers for the build function
"""
X_tx = tf.matmul(tf.nn.dropout(X, dropout_rate), weight_matrix) + bias_vec
if output_layer:
return X_tx
else:
return self.act(X_tx)
def _build_variables(self,
weights,
biases,
num_in,
num_out,
scale= 1):
"""
Custom initialiser for a weights, using a variation on Xavier initialisation
with smaller starting weights. Allows for faster convergence on low learn
rates, useful in the presence of multiple loss functions
"""
weights.append(tf.Variable(tf.truncated_normal([num_in, num_out],
mean = 0,
stddev = scale / np.sqrt(num_in + num_out))))
biases.append(tf.Variable(tf.zeros([num_out]))) #Bias can be zero
return weights, biases
def _sort_cols(self,
data,
subset):
"""
This function is used to sequence the columns of the dataset, so as to be in
the order [Continuous data], [Binary data], [Categorical data]. It simply
rearranges a column, done functionally to minimise memory overhead
"""
data_1 = data[subset]
data_0 = data.drop(subset, axis= 1)
chunk = data_1.shape[1]
return pd.concat([data_0, data_1], axis= 1), chunk
def build_model(self,
imputation_target,
binary_columns= None,
softmax_columns= None,
unsorted= True,
additional_data = None,
verbose= True,
):
"""
This method is called to construct the neural network that is the heart of
MIDAS. This includes the assignment of loss functions to the appropriate
data types.
THIS FUNCTION MUST BE CALLED BEFORE ANY TRAINING OR IMPUTATION OCCURS. Failing
to do so will simply raise an error.
The categorical columns should be a list of column names. Softmax columns
should be a list of lists of column names. This will allow the model to
dynamically assign cost functions to the correct variables. If, however,
the data comes pre-sorted, arranged can be set to "true", in which case
the arguments can be passed in as integers of size, ie. shape[1] attributes
for each of the relevant categories.
In other words, if you're experienced at using MIDAS and understand how its
indexing works, pre-sort your data and pass in the integers so specifying
reindexing values doesn't become too onerous.
Alternatively, list(df.columns.values) will output a list of column names,
which can be easily implemented in the 'for' loop which constructs your dummy
variables.
Args:
imputation_target: DataFrame. The name of the incomplete input dataset.
Upon being read in, the dataset will be appropriately formatted and stored
for training.
binary_columns: List of names. A list of all binary variables in the input
dataset.
softmax_columns: List of lists. The outer list should include all non-binary
categorical variables in the input dataset. Each inner list should contain
the mutually exclusive set of possible classes for each of these variables.
unsorted: Boolean. Specifies whether the input dataset has been pre-ordered
in terms of variable type (default = True, denoting no sorting). If
set to False, binary_columns and softmax_columns should be a list of integers
denoting shape attributes for each category.
additional_data: DataFrame. Data that should be included in the imputation
model but are not required for later analyses. Such data will not be
formatted, rearranged, or included in the loss functions, reducing training
time.
verbose: Boolean. Specifies whether to print messages to the terminal
(default = True).
Returns:
Self
"""
if not isinstance(imputation_target, pd.DataFrame):
raise TypeError("Input data must be in a DataFrame")
if imputation_target.isnull().sum().sum() == 0:
raise ValueError("Imputation target contains no missing values. Please"\
" ensure missing values are encoded as type np.nan")
self.original_columns = imputation_target.columns
cont_exists = False
cat_exists = False
in_size = imputation_target.shape[1]
if additional_data is not None:
add_size = additional_data.shape[1]
else:
add_size = 0
# Establishing indices for cost function
size_index = []
if binary_columns is not None:
if unsorted:
imputation_target, chunk = self._sort_cols(imputation_target,
binary_columns)
size_index.append(chunk)
else:
size_index.append(binary_columns)
cat_exists = True
if softmax_columns is not None:
if unsorted:
for subset in softmax_columns:
imputation_target, chunk = self._sort_cols(imputation_target,
subset)
size_index.append(chunk)
else:
for digit in softmax_columns:
size_index.append(digit)
if sum(size_index) < in_size:
chunk = in_size - sum(size_index)
size_index.insert(0, chunk)
cont_exists = True
if not sum(size_index) == in_size:
raise ValueError("Sorting columns has failed")
if verbose:
print("Size index:", size_index)
#Commit some variables to the instance of the class
self.size_index = size_index
if not self.input_is_pipeline:
self.na_matrix = imputation_target.notnull().astype(np.bool)
self.imputation_target = imputation_target.fillna(0)
if additional_data is not None:
self.additional_data = additional_data.fillna(0)
#Build graph
tf.reset_default_graph()
self.graph = tf.Graph()
with self.graph.as_default():
if self.seed is not None:
tf.set_random_seed(self.seed)
#Placeholders
self.X = tf.placeholder(tf.float32, [None, in_size])
self.na_idx = tf.placeholder(tf.bool, [None, in_size])
if additional_data is not None:
self.X_add = tf.placeholder(tf.float32, [None, add_size])
if self.vae_layer:
self.latent_inputs = tf.placeholder(tf.float32, [None, self.latent_space_size])
#Build list for determining input and output structures
struc_list = self.layer_structure.copy()
struc_list.insert(0, in_size + add_size)
outputs_struc = []
for n in range(len(size_index)):
if n == 0:
if cont_exists:
outputs_struc += ["cont"]*size_index[n]
elif cat_exists:
outputs_struc += ["bin"]*size_index[n]
else:
outputs_struc += [size_index[n]]
elif n == 1:
if cont_exists and cat_exists:
outputs_struc += ["bin"]*size_index[n]
else:
outputs_struc += [size_index[n]]
else:
outputs_struc += [size_index[n]]
if self.manual_outputs == True:
output_layer_size = np.sum(self.output_structure)
output_layer_structure = self.output_structure
else:
output_layer_structure = []
for item in outputs_struc:
if item == "cont":
output_layer_structure.append(self.output_structure[0])
if item == "bin":
output_layer_structure.append(self.output_structure[1])
if type(item) == int:
output_layer_structure.append(self.output_structure[2])
output_layer_size = np.sum(output_layer_structure)
#Instantiate and initialise variables
_w = []
_b = []
_zw = []
_zb = []
_ow = []
_ob = []
#Input, denoising
for n in range(len(struc_list) -1):
_w, _b = self._build_variables(weights= _w, biases= _b,
num_in= struc_list[n],
num_out= struc_list[n+1],
scale= self.init_scale)
if self.vae_layer:
mapped_dist = tf.distributions.Normal(tf.constant(0.),
tf.constant(self.vae_sample_var))
# mapped_dist = tf.distributions.StudentT(tf.constant(3.0),
# tf.constant(0.0),
# tf.constant(1.0))
#Latent state, variance
_zw, _wb = self._build_variables(weights= _zw, biases= _zb,
num_in= struc_list[-1],
num_out= self.latent_space_size*2,
scale= self.init_scale)
_zw, _wb = self._build_variables(weights= _zw, biases= _zb,
num_in= self.latent_space_size,
num_out= self.output_layers[0],
scale= self.init_scale)
t_l = len(self.output_layers)
#Output, specialisation
assert len(output_layer_structure) == len(outputs_struc)
output_split = []
if self.individual_outputs:
self.output_layers.append(output_layer_size)
for n in range(t_l):
_ow, _ob = self._build_variables(weights= _ow, biases= _ob,
num_in= self.output_layers[n],
num_out= self.output_layers[n+1],
scale= self.init_scale)
for n in range(len(outputs_struc)):
if type(outputs_struc[n]) == str:
_ow, _ob = self._build_variables(weights= _ow, biases= _ob,
num_in= output_layer_structure[n],
num_out= 1,
scale= self.init_scale)
output_split.append(1)
elif type(outputs_struc[n]) == int:
_ow, _ob = self._build_variables(weights= _ow, biases= _ob,
num_in= output_layer_structure[n],
num_out= outputs_struc[n],
scale= self.init_scale)
output_split.append(outputs_struc[n])
else:
self.output_layers.append(in_size)
for n in range(t_l):
_ow, _ob = self._build_variables(weights= _ow, biases= _ob,
num_in= self.output_layers[n],
num_out= self.output_layers[n+1])
for n in range(len(outputs_struc)):
if type(outputs_struc[n]) == str:
output_split.append(1)
elif type(outputs_struc[n]) == int:
output_split.append(outputs_struc[n])
#Build the neural network. Each layer is determined by the struc list
def denoise(X):
#Input tx
for n in range(len(struc_list) -1):
if n == 0:
if self.noise_type == 'bernoulli':
X = self._build_layer(X, _w[n], _b[n],
dropout_rate = self.input_drop)
elif self.noise_type == 'gaussian':
X = X + tf.distributions.Normal(loc=tf.constant(0.),
scale = tf.constant(self.input_drop)).sample(sample_shape= tf.shape(X))
X = self._build_layer(X, _w[n], _b[n],
dropout_rate = self.input_drop)
else:
X = self._build_layer(X, _w[n], _b[n],
dropout_rate = self.dropout_level)
return X
if self.vae_layer:
def to_z(X):
#Latent tx
X = self._build_layer(X, _zw[0], _zb[0], dropout_rate = self.dropout_level,
output_layer= True)
x_mu, x_log_sigma = tf.split(X, [self.latent_space_size]*2, axis=1)
return x_mu, x_log_sigma
def from_z(z):
#Joint transform
X = self._build_layer(z, _zw[1], _zb[1], dropout_rate= 1)
return X
def vae(X, output=False):
x_mu, x_log_sigma = to_z(X)
if output:
reparam_z = mapped_dist.sample(sample_shape= tf.shape(x_mu))
# reparam_z = tf.random_normal(tf.shape(x_mu))
else:
reparam_z = tf.random_normal(tf.shape(x_mu))
z = x_mu + reparam_z * tf.exp(x_log_sigma)
kld = tf.maximum(tf.reduce_mean(1 + 2*x_log_sigma*x_mu**2 - tf.exp(2-x_log_sigma),
axis=1)*self.prior_strength * - 0.5,
self.kld_min)
X = from_z(z)
return X, kld
if self.individual_outputs:
def decode(X):
for n in range(t_l):
X = self._build_layer(X, _ow[n], _ob[n], dropout_rate= self.dropout_level)
#Output tx
base_splits = tf.split(X, output_layer_structure, axis=1)
decombined = []
for n in range(len(outputs_struc)):
decombined.append(self._build_layer(base_splits[n], _ow[n+t_l], _ob[n+t_l],
dropout_rate = self.dropout_level,
output_layer= True))
return decombined
else:
def decode(X):
for n in range(t_l):
if n == t_l-1:
X = self._build_layer(X, _ow[n], _ob[n],
dropout_rate = self.dropout_level,
output_layer= True)
else:
X = self._build_layer(X, _ow[n], _ob[n],
dropout_rate = self.dropout_level)
decombined = tf.split(X, output_split, axis=1)
return decombined
if self.vae_layer:
def decode_z(z):
X = from_z(z)
X = decode(X)
return X
#Determine which imputation function is to be used. This is constructed to
#take advantage of additional data provided.
if additional_data is not None:
encoded = denoise(tf.concat([self.X, self.X_add], axis= 1))
else:
encoded = denoise(self.X)
if self.vae_layer:
perturb, kld = vae(encoded)
perturb_out, _ = vae(encoded, True)
pred_split = decode(perturb)
out_split = decode(perturb_out)
else:
pred_split = decode(encoded)
#Output functions
cost_list = []
self.output_types = []
#Build L2 loss and KL-Divergence
if self.weight_decay == 'default':
lmbda = 1/self.imputation_target.shape[0]
else:
lmbda = self.weight_decay
# if self.vae_layer:
# l2_penalty = tf.multiply(tf.reduce_mean(
# [tf.nn.l2_loss(w) for w in _w]+\
# [tf.nn.l2_loss(w) for w in _zw]+\
# [tf.nn.l2_loss(w) for w in _ow]
# ), lmbda)
# else:
# l2_penalty = tf.multiply(tf.reduce_mean(
# [tf.nn.l2_loss(w) for w in _w]+\
# [tf.nn.l2_loss(w) for w in _ow]
# ), lmbda)
#Assign cost and loss functions
na_split = tf.split(self.na_idx, output_split, axis=1)
true_split = tf.split(self.X, output_split, axis=1)
for n in range(len(outputs_struc)):
na_adj = tf.cast(tf.count_nonzero(na_split[n]),tf.float32)\
/tf.cast(tf.size(na_split[n]),tf.float32)
if outputs_struc[n] == 'cont':
if 'rmse' not in self.output_types:
self.output_types.append('rmse')
cost_list.append(tf.sqrt(
tf.losses.mean_squared_error(tf.boolean_mask(true_split[n], na_split[n]),
tf.boolean_mask(pred_split[n], na_split[n])\
))*self.cont_adj * na_adj)
elif outputs_struc[n] == 'bin':
if 'bacc' not in self.output_types:
self.output_types.append('bacc')
cost_list.append(
tf.losses.sigmoid_cross_entropy(tf.boolean_mask(true_split[n], na_split[n]),
tf.boolean_mask(pred_split[n], na_split[n]))\
*self.binary_adj * na_adj)
elif type(outputs_struc[n]) == int:
self.output_types.append('sacc')
cost_list.append(tf.losses.softmax_cross_entropy(
tf.reshape(tf.boolean_mask(true_split[n], na_split[n]), [-1, outputs_struc[n]]),
tf.reshape(tf.boolean_mask(pred_split[n], na_split[n]), [-1, outputs_struc[n]]))\
*self.softmax_adj *na_adj)
def output_function(out_split):
output_list = []
#Break outputs into their parts
for n in range(len(outputs_struc)):
if outputs_struc[n] == 'cont':
output_list.append(out_split[n])
elif outputs_struc[n] == 'bin':
output_list.append(tf.nn.sigmoid(out_split[n]))
elif type(outputs_struc[n]) == int:
output_list.append(tf.nn.softmax(out_split[n]))
return tf.concat(output_list, axis= 1)
self.outputs_struc = outputs_struc
if self.vae_layer:
self.output_op = output_function(out_split)
self.joint_loss = tf.reduce_mean(tf.reduce_sum(cost_list) + kld)# + l2_penalty)
self.encode_to_z = to_z(encoded)
self.gen_from_z_sample = output_function(decode_z(mapped_dist.sample(
sample_shape= tf.shape(self.latent_inputs))))
self.gen_from_z_inputs = output_function(decode_z(self.latent_inputs))
else:
self.output_op = output_function(pred_split)
self.joint_loss = tf.reduce_mean(tf.reduce_sum(cost_list))# + l2_penalty)
optim = tf.contrib.opt.AdamWOptimizer(lmbda, self.learn_rate)
self.train_step = optim.minimize(self.joint_loss)
self.init = tf.global_variables_initializer()
self.saver = tf.train.Saver()
self.model_built = True
if verbose:
print()
print("Computation graph constructed")
print()
return self
def train_model(self,
training_epochs= 100,
verbose= True,
verbosity_ival= 1,
excessive= False):
"""
This is the standard method for optimising the model's parameters. Must be
called before imputation can be performed.
Args:
training_epochs: Integer. The number of complete cycles (forward passes)
through the network during training (default = 100).
verbose: Boolean. Specifies whether to print messages to the terminal
during training, including loss values (default = True).
verbosity_ival: Integer. The number of training epochs between messages
(default = 1).
excessive: Boolean. Specifies whether to print loss for each mini-batch
to the terminal (default = \code{False}), which can help with
troubleshooting.
Returns:
Self. Model is automatically saved upon reaching specified number of epochs
"""
if not self.model_built:
raise AttributeError("The computation graph must be built before the model"\
" can be trained")
if self.input_is_pipeline:
raise AttributeError("Model was constructed to accept pipeline data, either"\
" use 'train_model_pipeline' method or rebuild model "\
"with in-memory dataset.")
feed_data = self.imputation_target.values
na_loc = self.na_matrix.values
with tf.Session(graph= self.graph) as sess:
sess.run(self.init)
if verbose:
print("Model initialised")
print()
for epoch in range(training_epochs):
count = 0
run_loss = 0
for batch in self._batch_iter(feed_data, na_loc, self.train_batch):
if np.sum(batch[1]) == 0:
continue
feedin = {self.X: batch[0], self.na_idx: batch[1]}
if self.additional_data is not None:
feedin[self.X_add] = batch[2]
loss, _ = sess.run([self.joint_loss, self.train_step],
feed_dict= feedin)
if excessive:
print("Current cost:", loss)
count +=1
if not np.isnan(loss):
run_loss += loss
if verbose:
if epoch % verbosity_ival == 0:
print('Epoch:', epoch, ", loss:", str(run_loss/count))
if verbose:
print("Training complete. Saving file...")
save_path = self.saver.save(sess, self.savepath)
if verbose:
print("Model saved in file: %s" % save_path)
return self
def generate_samples(self,
m= 50,
verbose= True):
"""
Method used to generate a set of m imputations to the .output_list attribute.
Imputations are stored within a list in memory, and can be accessed in any
order.
If a model has been pre-trained, on subsequent runs this function can be
directly called without having to train first. An 'if' statement checking
the default save location is useful for this.
Args:
m: Integer. The number of completed datasets to produce (default = 50)
verbose: Boolean. Specifies whether to print messages to the terminal
(default = True).
Returns:
Self
"""
if not self.model_built:
raise AttributeError("The computation graph must be built before the model"\
" can be trained")
if self.input_is_pipeline:
raise AttributeError("Model was constructed to accept pipeline data, either"\
" use 'pipeline_yield_samples' method or rebuild model "\
"with in-memory dataset.")
self.output_list = []
with tf.Session(graph= self.graph) as sess:
self.saver.restore(sess, self.savepath)
if verbose:
print("Model restored.")
for n in range(m):
feed_data = self.imputation_target.values
feedin = {self.X: feed_data}
if self.additional_data is not None:
feedin[self.X_add] = self.additional_data
y_out = pd.DataFrame(sess.run(self.output_op,
feed_dict= feedin),
columns= self.imputation_target.columns)
output_df = self.imputation_target.copy()
output_df[np.invert(self.na_matrix)] = y_out[np.invert(self.na_matrix)]
self.output_list.append(output_df)
return self
def yield_samples(self,
m= 50,
verbose= True):
"""
Method used to generate a set of m imputations via the 'yield' command, allowing
imputations to be used in a 'for' loop'
If a model has been pre-trained, on subsequent runs this function can be
directly called without having to train first. An 'if' statement checking
the default save location is useful for this.
Args:
m: Integer. Number of imputations to generate.
verbose: Boolean. Prints out messages.
Returns:
Self
"""
if not self.model_built:
raise AttributeError("The computation graph must be built before the model"\
" can be trained")
if self.input_is_pipeline:
raise AttributeError("Model was constructed to accept pipeline data, either"\
" use 'pipeline_yield_samples' method or rebuild model "\
"with in-memory dataset.")
with tf.Session(graph= self.graph) as sess:
self.saver.restore(sess, self.savepath)
if verbose:
print("Model restored.")
for n in range(m):
feed_data = self.imputation_target.values
feedin = {self.X: feed_data}
if self.additional_data is not None:
feedin[self.X_add] = self.additional_data
y_out = pd.DataFrame(sess.run(self.output_op,
feed_dict= feedin),
columns= self.imputation_target.columns)
output_df = self.imputation_target.copy()
output_df[np.invert(self.na_matrix)] = y_out[np.invert(self.na_matrix)]
yield output_df
return self
def batch_generate_samples(self,
m= 50,
b_size= 256,
verbose= True):
"""
Method used to generate a set of m imputations to the .output_list attribute.
Imputations are stored within a list in memory, and can be accessed in any
order. As batch generation implies very large datasets, this method is only
provided for completeness' sake.
This function is for a dataset large enough to be stored in memory, but
too large to be passed into the model in its entirety. This may be due to
GPU memory limitations, or just the size of the model
If a model has been pre-trained, on subsequent runs this function can be
directly called without having to train first. An 'if' statement checking
the default save location is useful for this.
Args:
m: Integer. Number of imputations to generate.
b_size: Integer. Number of data entries to process at once. For managing
wider datasets, smaller numbers may be required.
verbose: Boolean. Prints out messages.
Returns:
Self
"""
if not self.model_built:
raise AttributeError("The computation graph must be built before the model"\
" can be trained")
if self.input_is_pipeline:
raise AttributeError("Model was constructed to accept pipeline data, either"\
" use 'pipeline_yield_samples' method or rebuild model "\
"with in-memory dataset.")
self.output_list = []
with tf.Session(graph= self.graph) as sess:
self.saver.restore(sess, self.savepath)
if verbose:
print("Model restored.")
for n in range(m):
feed_data = self.imputation_target.values
minibatch_list = []
for batch in self._batch_iter_output(feed_data, b_size):
if self.additional_data is not None:
feedin = {self.X: batch[0], self.X_add: batch[1]}
else:
feedin = {self.X: batch}
y_batch = pd.DataFrame(sess.run(self.output_op,
feed_dict= feedin),
columns= self.imputation_target.columns)
minibatch_list.append(y_batch)
y_out = pd.DataFrame(pd.concat(minibatch_list, ignore_index= True),
columns= self.imputation_target.columns)
output_df = self.imputation_target.copy()
output_df[np.invert(self.na_matrix)] = y_out[np.invert(self.na_matrix)]
self.output_list.append(output_df)
return self
def batch_yield_samples(self,
m= 50,
b_size= 256,
verbose= True):
"""
Method used to generate a set of m imputations via the 'yield' command, allowing
imputations to be used in a 'for' loop'
This function is for a dataset large enough to be stored in memory, but
too large to be passed into the model in its entirety. This may be due to
GPU memory limitations, or just the size of the model
If a model has been pre-trained, on subsequent runs this function can be
directly called without having to train first. An 'if' statement checking
the default save location is useful for this.
Args:
m: Integer. Number of imputations to generate.
b_size: Integer. Number of data entries to process at once. For managing
wider datasets, smaller numbers may be required.
verbose: Boolean. Prints out messages.
Returns:
Self """
if not self.model_built:
raise AttributeError("The computation graph must be built before the model"\
" can be trained")
if self.input_is_pipeline:
raise AttributeError("Model was constructed to accept pipeline data, either"\
" use 'pipeline_yield_samples' method or rebuild model "\
"with in-memory dataset.")
with tf.Session(graph= self.graph) as sess:
self.saver.restore(sess, self.savepath)
if verbose:
print("Model restored.")
for n in range(m):
feed_data = self.imputation_target.values
minibatch_list = []
for batch in self._batch_iter_output(feed_data, b_size):
if self.additional_data is not None:
feedin = {self.X: batch[0], self.X_add: batch[1]}
else:
feedin = {self.X: batch}
y_batch = pd.DataFrame(sess.run(self.output_op,
feed_dict= feedin),
columns= self.imputation_target.columns)
minibatch_list.append(y_batch)
y_out = pd.DataFrame(pd.concat(minibatch_list, ignore_index= True),
columns= self.imputation_target.columns)
output_df = self.imputation_target.copy()
output_df[np.invert(self.na_matrix)] = y_out[np.invert(self.na_matrix)]
yield output_df
return self
def overimpute(self,
spikein = 0.1,
training_epochs= 100,
report_ival = 10,
report_samples = 32,
plot_vars= True,
verbose= True,
verbosity_ival= 1,
spike_seed= 42,
cont_kdes = False,
excessive= False,
plot_main = True,
skip_plot = False,
):
"""
This function spikes in additional missingness, so that known values can be
used to help adjust the complexity of the model. As conventional train/
validation splits can still lead to autoencoders overtraining, the method for
limiting complexity is overimputation and early stopping. This gives an
estimate of how the model will react to unseen variables.
Error is defined as RMSE for continuous variables, and classification error
for binary and categorical variables (ie. 1 - accuracy). Note that this means
that binary classification is inherently dependent on a selection threshold
of 0.5, and softmax accuracy will automatically decrease as a function of the
number of classes within the model. All three will be affected by the degree
of imbalance within the dataset.
The accuracy measures provided here may not be ideal for all problems, but
they are generally appropriate for selecting optimum complexity. Should the
lines denoting error begin to trend upwards, this indicates overtraining and
is a sign that the training_epochs parameter to the .train_model() method should
be capped before this point.
The actual optimal point may differ from that indicated by the .overimpute()
method for two reasons:
-The loss that is spiked in reduces the overall data available to the algorithm
to learn the patterns inherent, so there should be some improvement in performance
when .train_model() is called. If this is a concern, then it should be possible
to compare the behaviour of the loss figure between .train_model() and
.overimpute().
-The missingness inherent to the data may depend on some unobserved factor.
In this case, the bias in the observed data may lead to inaccurate inference.
It is worth visually inspecting the distribution of the overimputed values
against imputed values (using plot_vars) to ensure that they fall within a
sensible range.
The plots block execution of the code until they are closed. To only plot a
single overimputation graph at the end of the run, you can supply plot_main = False
and plot_vars = False. To run the imputation without plotting any graphs,
set skip_plot = True in addition. The overimputation function will still print
predicted errors to the console.
Args:
spikein: Float, between 0 and 1. The proportion of observed values in the
input dataset to be randomly removed (default = 0.1).
training_epochs: Integer. The number of overimputation training epochs
(default = 100). Selecting a low value increases the risk that trends in the
loss metrics have not stabilized by the end of training, in which case
additional epochs may be necessary.
report_ival: Integer. The number of overimputation training epochs between
calculations of loss (default = 10). Shorter intervals provide a more granular
view of model performance but slow down the overimputation process.
report_samples: The number of Monte Carlo samples drawn from the estimated
missing-data posterior for loss calculations (default = 32). A larger number
increases overimputation runtime and may thus necessitate a lower value of
report_ival.
plot_vars: Specifies whether to plot the distribution of original versus
overimputed values (default = True). This takes the form of a density
plot for continuous variables and a barplot for categorical variables (showing
proportions of each class).
plot_main: Boolean. Specifies whether to display the main graphical output
(overimputation error during training) at every reporting interval (default = True).
If set to False, it will only appear at the end of the overimputation training
process. Error values are still shown at each report_ival.
skip_plot: Boolean. Specifies whether to suppress the main graphical output
(default = False). This may be desirable when users are conducting multiple
overimputation exercises sequentially and are primarily interested in the console
output.
verbose: Boolean. Prints out messages, including loss, to the terminal (default = True).
verbosity_ival: Integer. The number of overimputation training epochs between
messages (default = True).
spike_seed: Integer. The value to which Python's pseudo-random number generator is initialized
for the missingness spike-in. This is separate to the seed specified in the Midas()
call.
excessive: Specifies whether to print aggregate mini-batch loss to the terminal
(default = False). This argument differs from the .train_model()'s excessive argument,
which prints individual mini-batch loss. This allows users to check for unusual imputations,
which may be helpful if loss is not declining during overimputation training.
"""
if not self.model_built:
raise AttributeError("The computation graph must be built before the model"\
" can be trained")
if self.input_is_pipeline:
raise AttributeError("Overimputation not currently supported for models"\
" which use a pipeline function for input.")
#These values simplify control flow used later for error calculation and
#visualisation of convergence.
if cont_kdes & (plot_vars == False):
raise ValueError("Cannot plot KDEs if plot_vars is False")
if excessive:
import time
rmse_in = False
sacc_in = False
bacc_in = False
if 'rmse' in self.output_types:
rmse_in = True
if 'sacc' in self.output_types:
def sacc(true, pred, spike): #Softmax accuracy
a = np.argmax(true, 1)
b = np.argmax(pred, 1)
return np.sum(a[spike.flatten()] == b[spike.flatten()]) / np.sum(spike)
def findcatname(strlist):
return strlist[0][:([min([x[0]==elem for elem in x]) \
for x in zip(*strlist)]+[0]).index(0)]
sacc_in = True
if 'bacc' in self.output_types:
def bacc(true, pred, spike):
pred = (pred > 0.5).astype(np.int_)
return np.sum(true[spike] == pred[spike]) / np.sum(spike)
bacc_in = True
feed_data = self.imputation_target.copy()
na_loc = self.na_matrix
np.random.seed(spike_seed)
n_softmax = 0 #Necessary to derive the average classification error
#Pandas lacks an equivalent to tf.split, so this is used to divide columns
#for their respective error metrics
break_list = list(np.cumsum(self.size_index))
break_list.insert(0, 0)
#Generate spike-in
spike = []
for n in range(len(self.size_index)):
if self.output_types[n] == 'sacc':
temp_spike = pd.Series(np.random.choice([True, False],
size= self.imputation_target.shape[0],
p= [spikein, 1-spikein]))
spike.append(pd.concat([temp_spike]*self.size_index[n], axis=1))
n_softmax += 1
else:
spike.append(pd.DataFrame(np.random.choice([True, False],
size= [self.imputation_target.shape[0],
self.size_index[n]],
p= [spikein, 1-spikein])))
spike = pd.concat(spike, axis= 1)
spike.columns = self.imputation_target.columns
spike[np.invert(na_loc)] = False
feed_data[spike] = 0
feed_data = feed_data.values
na_loc[spike] = False
spike = spike.values
na_loc = na_loc.values
#Initialise lists for plotting
s_rmse = []
a_rmse = []
s_bacc = []
a_bacc = []
s_sacc = []
a_sacc = []
with tf.Session(graph= self.graph) as sess:
sess.run(self.init)
print("Model initialised")
print()
for epoch in range(training_epochs + 1):
count = 0
run_loss = 0
for batch in self._batch_iter(feed_data, na_loc, self.train_batch):
if np.sum(batch[1]) == 0:
continue
feedin = {self.X: batch[0], self.na_idx: batch[1]}
if self.additional_data is not None:
feedin[self.X_add] = batch[2]
if excessive:
out, loss, _ = sess.run([self.output_op, self.joint_loss, self.train_step],
feed_dict= feedin)
print("Current cost:", loss)
print(out)
time.sleep(5)
else:
loss, _ = sess.run([self.joint_loss, self.train_step],
feed_dict= feedin)
count +=1
if not np.isnan(loss):
run_loss += loss
if verbose:
if epoch % verbosity_ival == 0:
print('Epoch:', epoch, ", loss:", str(run_loss/count))
if epoch % report_ival == 0:
"""
For each report interval, generate report_samples worth of imputations
and measure both individual and aggregate error values
"""
#Initialise losses
single_rmse = 0
single_sacc = 0
single_bacc = 0
first = True
if cont_kdes:
plot_first = True
for sample in range(report_samples):
minibatch_list = []
for batch in self._batch_iter_output(feed_data, self.train_batch):
feedin = {self.X: batch}
if self.additional_data is not None:
feedin = {self.X: batch[0]}
feedin[self.X_add] = batch[1]
else:
feedin = {self.X: batch}
y_batch = pd.DataFrame(sess.run(self.output_op,
feed_dict= feedin),
columns= self.imputation_target.columns)
minibatch_list.append(y_batch)
y_out = pd.DataFrame(pd.concat(minibatch_list, ignore_index= True),
columns= self.imputation_target.columns)
if cont_kdes:
if 'rmse' in self.output_types:
for n in range(self.size_index[0]):
plt.figure(n+1)
t_t = self.imputation_target.iloc[:,n]
t_p = y_out.iloc[:,n]
t_s = spike[:,n]
if plot_first:
t_p[t_s].plot(kind= 'density', color= 'k', alpha= 0.5, label='Single imputation')
else:
t_p[t_s].plot(kind= 'density', color= 'k', alpha= 0.5, label='_nolegend_')
plot_first = False
#Calculate individual imputation losses
for n in range(len(self.size_index)):
temp_pred = y_out.iloc[:,break_list[n]:break_list[n+1]]
temp_true = self.imputation_target.iloc[:,break_list[n]:break_list[n+1]]
temp_spike = spike[:,break_list[n]:break_list[n+1]]
if self.output_types[n] == 'sacc':
temp_spike = temp_spike[:,0]
single_sacc += (1 - sacc(temp_true.values,
temp_pred.values, temp_spike)) / n_softmax
elif self.output_types[n] == 'rmse':
single_rmse += np.sqrt(mse(temp_true[temp_spike],
temp_pred[temp_spike]))
else:
single_bacc += 1 - bacc(temp_true.values, temp_pred.values, temp_spike)
if first:
running_output = y_out
first= False
else:
running_output += y_out
single_rmse = single_rmse / report_samples
single_sacc = single_sacc / report_samples
single_bacc = single_bacc / report_samples
y_out = running_output / report_samples
#Calculate aggregate imputation losses
agg_rmse = 0
agg_sacc = 0
agg_bacc = 0
for n in range(len(self.size_index)):
temp_pred = y_out.iloc[:,break_list[n]:break_list[n+1]]
temp_true = self.imputation_target.iloc[:,break_list[n]:break_list[n+1]]
temp_spike = spike[:,break_list[n]:break_list[n+1]]
if self.output_types[n] == 'sacc':
temp_spike = temp_spike[:,0]
if plot_vars:
temp_pred[temp_spike].mean().plot(kind= 'bar',
label= 'Imputed values (mean)', color ='C0')
temp_true[temp_spike].mean().plot(kind= 'bar', alpha= 0.5,
color= 'r', align= 'edge', label= 'Removed observed values (mean)')
temp_true_name = findcatname(temp_true[temp_spike].columns)[:-1]
plt.title('Overimputation density plot: '+temp_true_name+' (categorical)')
plt.xlabel(temp_true_name)
plt.ylabel('Proportion')
plt.legend()
plt.show()
agg_sacc += (1 - sacc(temp_true.values, temp_pred.values,
temp_spike)) / n_softmax
elif self.output_types[n] == 'rmse':
if plot_vars:
for n_rmse in range(len(temp_pred.columns)):
plt.figure(n_rmse+1)
t_p = temp_pred.iloc[:,n_rmse]
t_t = temp_true.iloc[:,n_rmse]
t_s = temp_spike[:,n_rmse]
t_p[t_s].plot(kind= 'density', label= 'Imputed values (mean)')
t_t[t_s].plot(kind= 'density', color= 'r', label= 'Removed observed values')
t_t.plot(kind='kde', color= 'g', label= 'All observed values')
hyp_output = pd.concat([t_t[np.invert(t_s)], t_p[t_s]])
hyp_output.plot(kind='kde', color= 'm', label = 'Completed data')
plt.title('Overimputation density plot: ' + \
temp_pred.columns[n_rmse] + ' (continuous)')
plt.xlabel(temp_pred.columns[n_rmse])
plt.ylabel('Density')
plt.legend()
plt.show()
agg_rmse += np.sqrt(mse(temp_true[temp_spike],
temp_pred[temp_spike]))
else:
if plot_vars:
temp_pred[temp_spike].mean().plot(kind= 'bar',
label= 'Imputed values',
color= 'C0')
temp_true[temp_spike].mean().plot(kind= 'bar', alpha= 0.5,
color= 'r', align= 'edge', label= 'Observed values')
plt.title('Overimputation binary proportions')
plt.xlabel('Variables')
plt.ylabel('Proportion')
plt.legend()
plt.show()
agg_bacc += 1 - bacc(temp_true.values, temp_pred.values, temp_spike)
#Plot losses depending on which loss values present in data
if rmse_in:
s_rmse.append(single_rmse)
a_rmse.append(agg_rmse)
print("Individual RMSE on spike-in:", single_rmse)
print("Aggregated RMSE on spike-in:", agg_rmse)
if sacc_in:
s_sacc.append(single_sacc)
a_sacc.append(agg_sacc)
print("Individual error on softmax spike-in:", single_sacc)
print("Aggregated error on softmax spike-in:", agg_sacc)
if bacc_in:
s_bacc.append(single_bacc)
a_bacc.append(agg_bacc)
print("Individual error on binary spike-in:", single_bacc)
print("Aggregated error on binary spike-in:", agg_bacc)
if plot_main or ((training_epochs - epoch) < report_ival):
if rmse_in:
plt.plot(s_rmse, 'k-', label= "Individual RMSE")
plt.plot(a_rmse, 'k--', label= "Aggregated RMSE")
min_sr = min(s_rmse)
min_ar = min(a_rmse)
plt.plot([min_sr]*len(s_rmse), 'r:')
plt.plot([min_ar]*len(a_rmse), 'r:')
plt.plot(s_rmse.index(min(s_rmse)),
min_sr, 'rx')
plt.plot(a_rmse.index(min(a_rmse)),
min_ar, 'rx')
if sacc_in:
plt.plot(s_sacc, 'g-', label= "Individual classification error")
plt.plot(a_sacc, 'g--', label= "Aggregated classification error")
min_ss = min(s_sacc)
min_as = min(a_sacc)
plt.plot([min_ss]*len(s_sacc), 'r:')
plt.plot([min_as]*len(a_sacc), 'r:')
plt.plot(s_sacc.index(min(s_sacc)),
min_ss, 'rx')
plt.plot(a_sacc.index(min(a_sacc)),
min_as, 'rx')
if bacc_in:
plt.plot(s_bacc, 'b-', label= "Individual binary error")
plt.plot(a_bacc, 'b--', label= "Aggregated binary error")
min_sb = min(s_bacc)
min_ab = min(a_bacc)
plt.plot([min_sb]*len(s_bacc), 'r:')
plt.plot([min_ab]*len(a_bacc), 'r:')
plt.plot(s_bacc.index(min(s_bacc)),
min_sb, 'rx')
plt.plot(a_bacc.index(min(a_bacc)),
min_ab, 'rx')
#Complete plots
if not skip_plot:
plt.title("Overimputation error during training")
plt.ylabel("Error")
plt.legend()
plt.ylim(ymin= 0)
plt.xlabel("Reporting interval")
plt.show()
print("Overimputation complete. Adjust complexity as needed.")
return self
def build_model_pipeline(self,
data_sample,
binary_columns= None,
softmax_columns= None,
unsorted= True,
additional_data_sample= None,
verbose= True,
crossentropy_adj= 1,
loss_scale = 1):
"""
This function is for integration with databasing or any dataset that needs
to be batched into memory. The data sample is simply there to allow the
original constructor to be recycled. The head of the data should be sufficient
to build the imputation model. The input pipeline itself should pre-scale
the data, and code null values as type np.nan. The pipeline ought to output
a Pandas DataFrame. If additional data will be passed in, then the return must
be a list of two DataFrames. The columns of the dataframe will be re-arranged
so that error functions are efficiently generated.
IT IS IMPERITIVE that this ordering is respected. Design the input batching
function accordingly.
The categorical columns should be a list of column names. Softmax columns
should be a list of lists of column names. This will allow the model to
dynamically assign cost functions to the correct variables. If, however,
the data comes pre-sorted, arranged can be set to "true", in which case
the arguments can be passed in as integers of size, ie. shape[1] attributes
for each of the relevant categories.
In other words, pre-sort your data and pass in the integers, so indexing
dynamically doesn't become too difficult. Alternatively, list(df.columns.values)
will output a list of column names, which can be easily implemented in the
'for' loop which constructs your dummy variables.
"""
self.input_is_pipeline = True
b_c = binary_columns
s_c = softmax_columns
us = unsorted
a_d = additional_data_sample
vb = verbose
cea = crossentropy_adj
l_s = loss_scale
self.build_model(data_sample, b_c, s_c, us, a_d, vb, cea, l_s)
return self
def train_model_pipeline(self,
input_pipeline,
training_epochs= 100,
verbose= True,
verbosity_ival= 1,
excessive= False):
"""
This is the alternative method for optimising the model's parameters when input
data must be batched into memory. Must be called before imputation can be
performed. The model will then be saved to the specified directory
Args:
input_pipeline: Function which yields a pre-processed and scaled DataFrame
from the designated source, be it a server or large flat file.
training_epochs: Integer. The number of epochs the model will run for
verbose: Boolean. Prints out messages, including loss
verbosity_ival: Integer. This number determines the interval between
messages.
excessive: Boolean. Used for troubleshooting, this argument will cause the
cost of each batch to be printed to the terminal.
Returns:
Self. Model is automatically saved upon reaching specified number of epochs
"""
self.input_pipeline = input_pipeline
if not self.model_built:
raise AttributeError("The computation graph must be built before the model"\
" can be trained")
if not self.input_is_pipeline:
raise AttributeError("Model was constructed to accept locally-stored data,"\
"either use 'train_model' method or rebuild model "\
"with the 'build_model_pipeline' method.")
if self.seed is not None:
np.random.seed(self.seed)
with tf.Session(graph= self.graph) as sess:
sess.run(self.init)
if verbose:
print("Model initialised")
print()
for epoch in range(training_epochs):
count = 0
run_loss = 0
for feed_data in input_pipeline:
if self.additional_data is None:
if not isinstance(feed_data, pd.DataFrame):
raise TypeError("Input data must be in a DataFrame")
na_loc = feed_data.notnull().astype(bool).values
feedin = {self.X: feed_data.values,
self.na_idx: na_loc}
else:
if not isinstance(feed_data, list):
raise TypeError("Input should be a list of two DataFrames, with "\
"index 0 containing the target imputation data, and"\
" the data at index 1 containing additional data")
if len(feed_data) != 2:
raise TypeError("Input should be a list of two DataFrames, with "\
"index 0 containing the target imputation data, and"\
" the data at index 1 containing additional data")
if not isinstance(feed_data[0], pd.DataFrame):
raise TypeError("Input data must be in a DataFrame")
if not isinstance(feed_data[1], pd.DataFrame):
raise TypeError("Additional data must be in a DataFrame")
na_loc = feed_data[0].notnull().astype(bool).values
feedin = {self.X: feed_data[0].fillna(0).values,
self.X_add: feed_data[1].fillna(0).values,
self.na_idx: na_loc}
if np.sum(na_loc) == 0:
continue
loss, _ = sess.run([self.joint_loss, self.train_step],
feed_dict= feedin)
if excessive:
print("Current cost:", loss)
count +=1
if not np.isnan(loss):
run_loss += loss
if verbose:
if epoch % verbosity_ival == 0:
print('Epoch:', epoch, ", loss:", str(run_loss/count))
if verbose:
print("Training complete. Saving file...")
save_path = self.saver.save(sess, self.savepath)
if verbose:
print("Model saved in file: %s" % save_path)
return self
def yield_samples_pipeline(self,
verbose= False):
"""
As its impossible to know the specifics of the pipeline, this method simply
cycles through all data provided by the input function. The number of imputations
can be specified by the user, depending on their needs.
Args:
verbose: Prints out messages
Yields:
A 'DataFrame' of the size specified by the input function passed to the
'train_model_pipeline' method.
Returns:
Self
"""
if not self.model_built:
raise AttributeError("The computation graph must be built before the model"\
" can be trained")
if not self.input_is_pipeline:
raise AttributeError("Model was constructed to accept locally-stored data,"\
"either use 'train_model' method or rebuild model "\
"with the 'build_model_pipeline' method.")
if self.seed is not None:
np.random.seed(self.seed)
with tf.Session(graph= self.graph) as sess:
self.saver.restore(sess, self.savepath)
if verbose:
print("Model restored.")
for feed_data in self.inpinput_pipeline:
if self.additional_data is None:
if not isinstance(feed_data, pd.DataFrame):
raise TypeError("Input data must be in a DataFrame")
na_loc = feed_data.notnull().astype(bool).values
feedin = {self.X: feed_data.fillna(0).values}
else:
if not isinstance(feed_data, list):
raise TypeError("Input should be a list of two DataFrames, with "\
"index 0 containing the target imputation data, and"\
" the data at index 1 containing additional data")
if len(feed_data) != 2:
raise TypeError("Input should be a list of two DataFrames, with "\
"index 0 containing the target imputation data, and"\
" the data at index 1 containing additional data")
if not isinstance(feed_data[0], pd.DataFrame):
raise TypeError("Input data must be in a DataFrame")
if not isinstance(feed_data[1], pd.DataFrame):
raise TypeError("Additional data must be in a DataFrame")
na_loc = feed_data[0].notnull().astype(bool).values
feedin = {self.X: feed_data[0].fillna(0).values,
self.X_add: feed_data[1].fillna(0).values}
feed_data = feed_data[0]
na_loc = feed_data.notnull().astype(bool).values
y_out = pd.DataFrame(sess.run(self.output_op,feed_dict= feedin),
columns= self.imputation_target.columns)
output_df = self.imputation_target.copy()
output_df[np.invert(na_loc)] = y_out[ | np.invert(na_loc) | numpy.invert |
import numpy as np
from typing import Union, List
def continuous_to_int(continuous: float, lower_bound: int, upper_bound: int) -> int:
"""
Convert the continuous variable to its corresponding integer value
"""
val = int( | np.floor((upper_bound - lower_bound + 1) * continuous) | numpy.floor |
#!/usr/bin/python
import os, sys
import json
import numpy as np
import re
### Name: <NAME>
### ID: 16339936
### github link:
### https://github.com/EoghanOGallchoir/ARC
def solve_4258a5f9(x):
'''
Task description:
Light blue squares must be surrounded by purple squares. (Unsure of exact colours, bit colour blind)
so a grid of: 0 0 0 0 0 must become: 0 1 1 1 0
0 0 5 0 0 0 1 5 1 0
0 0 0 0 0 0 1 1 1 0
Function:
Solve will work by identifying blue squares, which is 5 in the grid.
Then, the function will put purple squares, 1, around the blue (5) square.
'''
blue = 5
purple = 1
#iterating through each row+column to find blues
for row in range(0, x.shape[0]):
for col in range(0, x.shape[1]):
# changing the surrounding black squares to blue
if x[row][col] == blue:
for change in range(-1,2):
x[row-1][col+change] = purple
x[row+1][col+change] = purple
# easiest way i could think of keeping the blue square blue while changing left and right of it
if x[row][col+change] != blue:
x[row][col+change] = purple
return x
def solve_46442a0e(x):
'''
Task Description:
This takes the entire grid as an input and rotates it around 3 times, if that makes sense.
So a 2x2 grid of: 0 1 becomes a 4x4 grid of: 0 1 1 0
1 0 1 0 0 1
1 0 0 1
0 1 1 0
As you can see the original matrix in the top left gets rotated around.
Function:
Get the grid, x, and rotate its orientation 90 deg to get x1.
get x1 and rotate that 90 deg to get x2, then x2 rotate to get x3.
Finally put all 4 grids together in the form: x x1
x3 x2
'''
# using list comprehension to revere grids x, x1, x2 using reversed() python function
x1 = [list(reversed(colour)) for colour in zip(*x)]
x2 = [list(reversed(colour)) for colour in zip(*x1)]
x3 = [list(reversed(colour)) for colour in zip(*x2)]
# combining the the lists along the horizontal axis, two halves
arr1 = np.concatenate((x, x1), axis=1)
arr2 = np.concatenate((x3,x2),axis=1)
# combining both into one grid
x = | np.concatenate((arr1,arr2)) | numpy.concatenate |
import pickle
import sys
import logging
import time
import random
import numpy as np
from nn.utils import timer
from nn.cnn.lenet5_layers import (
Convolution2D,
MaxPooling2D,
FullyConnected,
Flatten,
ReLu,
Softmax
)
logger = logging.getLogger(__name__)
class LeNet5:
'''
LeNet-5:
input: 28x28
conv1: (5x5x6)@s1p2 -> 28x28x6 {(28-5+2x2)/1+1}
maxpool2: (2x2)@s2 -> 14x14x6 {(28-2)/2+1}
conv3: (5x5x16)@s1p0 -> 10x10x16 {(14-5)/1+1}
maxpool4: (2x2)@s2 -> 5x5x16 {(10-2)/2+1}
conv5: (5x5x120)@s1p0 -> 1x1x120 {(5-5)/1+1}
fc6: 120 -> 84
fc7: 84 -> 10
softmax: 10 -> 10
'''
def __init__(self, lr=0.01, smc=None):
self.lr = lr # 0.01
self.smc = smc
self.layers = []
self.layers.append(
Convolution2D(inputs_channel=1, num_filters=6, kernel_size=5, padding=2, stride=1, learning_rate=self.lr,
name='conv1', smc=smc))
self.layers.append(ReLu())
self.layers.append(MaxPooling2D(pool_size=2, stride=2, name='maxpool2'))
self.layers.append(
Convolution2D(inputs_channel=6, num_filters=16, kernel_size=5, padding=0, stride=1, learning_rate=self.lr,
name='conv3'))
self.layers.append(ReLu())
self.layers.append(MaxPooling2D(pool_size=2, stride=2, name='maxpool4'))
self.layers.append(
Convolution2D(inputs_channel=16, num_filters=120, kernel_size=5, padding=0, stride=1, learning_rate=self.lr,
name='conv5'))
self.layers.append(ReLu())
self.layers.append(Flatten())
self.layers.append(FullyConnected(num_inputs=120, num_outputs=84, learning_rate=lr, name='fc6'))
self.layers.append(ReLu())
self.layers.append(FullyConnected(num_inputs=84, num_outputs=10, learning_rate=lr, name='fc7'))
self.layers.append(Softmax())
self.lay_num = len(self.layers)
def cross_entropy(self, inputs, labels):
out_num = labels.shape[0]
p = np.sum(labels.reshape(1, out_num) * inputs)
loss = -np.log(p)
return loss
def train(self, training_data, training_label, batch_size, epoch, weights_file):
total_acc = 0
for e in range(epoch):
for batch_index in range(0, training_data.shape[0], batch_size):
# batch input
if batch_index + batch_size < training_data.shape[0]:
data = training_data[batch_index:batch_index + batch_size]
label = training_label[batch_index:batch_index + batch_size]
else:
data = training_data[batch_index:training_data.shape[0]]
label = training_label[batch_index:training_label.shape[0]]
loss = 0
acc = 0
start_time = time.perf_counter()
for b in range(batch_size):
x = data[b]
y = label[b]
# forward pass
for l in range(self.lay_num):
output = self.layers[l].forward(x)
x = output
loss += self.cross_entropy(output, y)
if np.argmax(output) == np.argmax(y):
acc += 1
total_acc += 1
# backward pass
dy = y
for l in range(self.lay_num - 1, -1, -1):
dout = self.layers[l].backward(dy)
dy = dout
# time
end_time = time.perf_counter()
batch_time = end_time - start_time
remain_time = (training_data.shape[0] * epoch - batch_index - training_data.shape[
0] * e) / batch_size * batch_time
hrs = int(remain_time) / 3600
mins = int((remain_time / 60 - hrs * 60))
secs = int(remain_time - mins * 60 - hrs * 3600)
# result
loss /= batch_size
batch_acc = float(acc) / float(batch_size)
training_acc = float(total_acc) / float((batch_index + batch_size) * (e + 1))
print('== Epoch: {0:d}/{1:d} - Iter:{2:d} - Loss: {3:.2f} - BAcc: {4:.2f} - TAcc: {5:.2f} - Remain: {6:d} Hrs {7:d} Mins {8:d} Secs =='.format(
e, epoch, batch_index + batch_size, loss, batch_acc, training_acc, int(hrs), int(mins),
int(secs)))
logger.info('== Epoch: {0:d}/{1:d} - Iter:{2:d} - Loss: {3:.2f} - BAcc: {4:.2f} - TAcc: {5:.2f} - Remain: {6:d} Hrs {7:d} Mins {8:d} Secs =='.format(
e, epoch, batch_index + batch_size, loss, batch_acc, training_acc, int(hrs), int(mins),
int(secs)))
# dump weights and bias
obj = []
for i in range(self.lay_num):
cache = self.layers[i].extract()
obj.append(cache)
with open(weights_file, 'wb') as handle:
pickle.dump(obj, handle, protocol=pickle.HIGHEST_PROTOCOL)
def test(self, data, label, test_size):
toolbar_width = 40
sys.stdout.write("[%s]" % (" " * (toolbar_width - 1)))
sys.stdout.flush()
sys.stdout.write("\b" * (toolbar_width))
step = float(test_size) / float(toolbar_width)
st = 1
total_acc = 0
for i in range(test_size):
if i == round(step):
step += float(test_size) / float(toolbar_width)
st += 1
sys.stdout.write(".")
# sys.stdout.write("%s]a"%(" "*(toolbar_width-st)))
# sys.stdout.write("\b" * (toolbar_width-st+2))
sys.stdout.flush()
x = data[i]
y = label[i]
for l in range(self.lay_num):
output = self.layers[l].forward(x)
x = output
if np.argmax(output) == np.argmax(y):
total_acc += 1
sys.stdout.write("\n")
print('== Test Size:{0:d} - Test Acc:{1:.2f} =='.format(test_size, float(total_acc) / float(test_size)))
logger.info('== Test Size:{0:d} - Test Acc:{1:.2f} =='.format(test_size, float(total_acc) / float(test_size)))
def test_with_pretrained_weights(self, data, label, test_size, weights_file):
with open(weights_file, 'rb') as handle:
b = pickle.load(handle)
self.layers[0].feed(b[0]['conv1.weights'], b[0]['conv1.bias'])
self.layers[3].feed(b[3]['conv3.weights'], b[3]['conv3.bias'])
self.layers[6].feed(b[6]['conv5.weights'], b[6]['conv5.bias'])
self.layers[9].feed(b[9]['fc6.weights'], b[9]['fc6.bias'])
self.layers[11].feed(b[11]['fc7.weights'], b[11]['fc7.bias'])
toolbar_width = 40
sys.stdout.write("[%s]" % (" " * (toolbar_width - 1)))
sys.stdout.flush()
sys.stdout.write("\b" * (toolbar_width))
step = float(test_size) / float(toolbar_width)
st = 1
total_acc = 0
for i in range(test_size):
if i == round(step):
step += float(test_size) / float(toolbar_width)
st += 1
sys.stdout.write(".")
# sys.stdout.write("%s]a"%(" "*(toolbar_width-st)))
# sys.stdout.write("\b" * (toolbar_width-st+2))
sys.stdout.flush()
x = data[i]
y = label[i]
for l in range(self.lay_num):
output = self.layers[l].forward(x)
x = output
if np.argmax(output) == np.argmax(y):
total_acc += 1
sys.stdout.write("\n")
print('== Test Size:{0:d} - Test Acc:{1:.2f} =='.format(test_size, float(total_acc) / float(test_size)))
logger.info('== Test Size:{0:d} - Test Acc:{1:.2f} =='.format(test_size, float(total_acc) / float(test_size)))
def predict_with_pretrained_weights(self, inputs, weights_file):
with open(weights_file, 'rb') as handle:
b = pickle.load(handle)
self.layers[0].feed(b[0]['conv1.weights'], b[0]['conv1.bias'])
self.layers[3].feed(b[3]['conv3.weights'], b[3]['conv3.bias'])
self.layers[6].feed(b[6]['conv5.weights'], b[6]['conv5.bias'])
self.layers[9].feed(b[9]['fc6.weights'], b[9]['fc6.bias'])
self.layers[11].feed(b[11]['fc7.weights'], b[11]['fc7.bias'])
for l in range(self.lay_num):
output = self.layers[l].forward(inputs)
inputs = output
digit = np.argmax(output)
probability = output[0, digit]
return digit, probability
def _eval(self, data, label, test_size):
total_acc = 0
for i in range(test_size):
x = data[i]
y = label[i]
for l in range(self.lay_num):
if l == 0:
output = self.layers[l].forward(x, testing_flag=True)
else:
output = self.layers[l].forward(x)
x = output
if | np.argmax(output) | numpy.argmax |
import warnings
import numpy as np
from simtk import unit
from tqdm import tqdm
from benchmark import simulation_parameters
from benchmark.integrators import LangevinSplittingIntegrator
from benchmark.testsystems import NonequilibriumSimulator
from benchmark.testsystems import water_cluster_rigid, alanine_constrained
from benchmark.testsystems.bookkeepers import get_state_as_mdtraj
from multiprocessing import Pool
n_processes = 32
# experiment variables
testsystems = {
"alanine_constrained": alanine_constrained,
"water_cluster_rigid": water_cluster_rigid,
}
splittings = {"OVRVO": "O V R V O",
"ORVRO": "O R V R O",
"RVOVR": "R V O V R",
"VRORV": "V R O R V",
}
marginals = ["configuration", "full"]
dt_range = np.array([0.1] + list(np.arange(0.5, 8.001, 0.5))) * unit.femtosecond
# constant parameters
collision_rate = 1.0 / unit.picoseconds
temperature = simulation_parameters['temperature']
def n_steps_(dt, n_collisions=1, max_steps=1000):
"""Heuristic for how many steps are needed to reach steady state:
should run at least long enough to have n_collisions full "collisions"
with the bath.
This corresponds to more discrete steps when dt is small, and fewer discrete steps
when dt is large.
Examples:
n_steps_(dt=1fs) = 1000
n_steps_(dt=2fs) = 500
n_steps_(dt=4fs) = 250
n_steps_(dt=8fs) = 125
"""
return min(max_steps, int((n_collisions / collision_rate) / dt))
# adaptive inner-loop params
inner_loop_initial_size = 50
inner_loop_batch_size = 1
inner_loop_stdev_threshold = 0.01
inner_loop_max_samples = 50000
# adaptive outer-loop params
outer_loop_initial_size = 50
outer_loop_batch_size = 100
outer_loop_stdev_threshold = inner_loop_stdev_threshold
outer_loop_max_samples = 1000
def stdev_log_rho_pi(w):
"""Approximate the standard deviation of the estimate of log < e^{-w} >_{x; \Lambda}
Parameters
----------
w : unitless (kT) numpy array of work samples
Returns
-------
stdev : float
Notes
-----
This will be an underestimate esp. when len(w) is small or stdev_log_rho_pi is large.
"""
assert(type(w) != unit.Quantity) # assert w is unitless
assert(type(w) == np.ndarray) # assert w is a numpy array
# use leading term in taylor expansion: anecdotally, looks like it's in good agreement with
# bootstrapped uncertainty estimates up to ~0.5-0.75, then becomes an increasingly bad underestimate
return np.std(np.exp(-w)) / (np.mean(np.exp(-w)) * np.sqrt(len(w)))
def stdev_kl_div(outer_samples):
"""Approximate the stdev of the estimate of E_rho log_rho_pi"""
# TODO: Propagate uncertainty from the estimates of log_rho_pi
# currently, just use standard error of mean of log_rho_pi_s
log_rho_pi_s = np.array([np.log(np.mean(np.exp(-sample['Ws']))) for sample in outer_samples])
return | np.std(log_rho_pi_s) | numpy.std |
from astropy import units as u
from json_to_dict import constants
import numpy as np
pi = np.pi
######################################################################################
def get_K(Ms, Mp, P, i, e=0*u.one, G=constants["G"]):
top = (2*pi*G)**(1/3) * Mp * np.sin(i)
bottom = P**(1/3) * (Ms + Mp)**(2/3) * | np.sqrt(1-e**2) | numpy.sqrt |
"""
#The below is edited based on work from <NAME> (date created 2019-04-16-12-28, <EMAIL>) https://github.com/raspstephan/Lorenz-Online/blob/master/L96.py #
#https://hub.gke2.mybinder.org/user/raspstephan-l96-visualization-vn2xzwv1/notebooks/L96-visualization.ipynb#
"""
import sys
def in_notebook():
"""
Returns ``True`` if the module is running in IPython kernel,
``False`` if in IPython shell or other Python shell.
"""
return 'ipykernel' in sys.modules
import numpy as np
import xarray as xr
if in_notebook():
from tqdm import tqdm_notebook as tqdm
else:
from tqdm import tqdm
class L96TwoLevel_updated(object):
def __init__(self, K=36, J=10, h=1, F=10, c=10, b=10, dt=0.001,
X_init=None, Y_init=None, noprog=False, noYhist=False, save_dt=0.1,
integration_type='uncoupled', parameterization=None):
# Model parameters
self.K, self.J, self.h, self.F, self.c, self.b, self.dt = K, J, h, F, c, b, dt
self.noprog, self.noYhist, self.integration_type = noprog, noYhist, integration_type
self.step_count = 0
self.save_dt = save_dt
self.parameterization = parameterization
if self.parameterization is not None: self.integration_type = 'parameterization'
self.save_steps = int(save_dt / dt)
self.X = np.random.rand(self.K) if X_init is None else X_init.copy()
self.Y = np.zeros(self.K * self.J) if Y_init is None else Y_init.copy()
self._history_X = [self.X.copy()]
self._history_Y_mean = [self.Y.reshape(self.K, self.J).mean(1).copy()]
self._history_Y2_mean = [(self.Y.reshape(self.K, self.J)**2).mean(1).copy()]
self._history_B = [-self.h * self.c * self.Y.reshape(self.K, self.J).mean(1)]
if not self.noYhist:
self._history_Y = [self.Y.copy()]
def _rhs_X_dt(self, X, Y=None, B=None):
"""Compute the right hand side of the X-ODE."""
if Y is None:
dXdt = (
-np.roll(X, 1) * (np.roll(X, 2) - np.roll(X, -1)) -
X + self.F + B
)
else:
dXdt = (
-np.roll(X, 1) * (np.roll(X, 2) - np.roll(X, -1)) -
X + self.F - (self.h /self.b) * self.c * Y.reshape(self.K, self.J).sum(1)
)
return self.dt * dXdt
def _rhs_Y_dt(self, X, Y):
"""Compute the right hand side of the Y-ODE."""
dYdt = (
-self.b * np.roll(Y, -1) * (np.roll(Y, -2) - np.roll(Y, 1)) -
Y + self.h / self.b * np.repeat(X, self.J)
) * self.c
return self.dt * dYdt
def _rhs_dt(self, X, Y):
return self._rhs_X_dt(X, Y=Y), self._rhs_Y_dt(X, Y)
def step(self, add_B=True, B=None):
"""Integrate one time step"""
if self.parameterization is None:
B = -(self.h / self.b) * self.c * self.Y.reshape(self.K, self.J).sum(1) if B is None else B
if self.integration_type == 'coupled':
k1_X, k1_Y = self._rhs_dt(self.X, self.Y)
k2_X, k2_Y = self._rhs_dt(self.X + k1_X / 2, self.Y + k1_Y / 2)
k3_X, k3_Y = self._rhs_dt(self.X + k2_X / 2, self.Y + k2_Y / 2)
k4_X, k4_Y = self._rhs_dt(self.X + k3_X, self.Y + k3_Y)
elif self.integration_type == 'uncoupled':
k1_X = self._rhs_X_dt(self.X, B=B)
k2_X = self._rhs_X_dt(self.X + k1_X / 2, B=B)
k3_X = self._rhs_X_dt(self.X + k2_X / 2, B=B)
k4_X = self._rhs_X_dt(self.X + k3_X, B=B)
# Then update Y with unupdated X
k1_Y = self._rhs_Y_dt(self.X, self.Y)
k2_Y = self._rhs_Y_dt(self.X, self.Y + k1_Y / 2)
k3_Y = self._rhs_Y_dt(self.X, self.Y + k2_Y / 2)
k4_Y = self._rhs_Y_dt(self.X, self.Y + k3_Y)
self.X += 1 / 6 * (k1_X + 2 * k2_X + 2 * k3_X + k4_X)
self.Y += 1 / 6 * (k1_Y + 2 * k2_Y + 2 * k3_Y + k4_Y)
else: # Parameterization case
k1_X = self._rhs_X_dt(self.X, B=0)
k2_X = self._rhs_X_dt(self.X + k1_X / 2, B=0)
k3_X = self._rhs_X_dt(self.X + k2_X / 2, B=0)
k4_X = self._rhs_X_dt(self.X + k3_X, B=0)
B = self.parameterization(self.X) if B is None else B
self.X += 1 / 6 * (k1_X + 2 * k2_X + 2 * k3_X + k4_X)
if add_B: self.X += B * self.dt
self.step_count += 1
if self.step_count % self.save_steps == 0:
Y_mean = self.Y.reshape(self.K, self.J).mean(1)
Y2_mean = (self.Y.reshape(self.K, self.J)**2).mean(1)
self._history_X.append(self.X.copy())
self._history_Y_mean.append(Y_mean.copy())
self._history_Y2_mean.append(Y2_mean.copy())
self._history_B.append(B.copy())
if not self.noYhist:
self._history_Y.append(self.Y.copy())
def iterate(self, time):
steps = int(time / self.dt)
for n in tqdm(range(steps), disable=self.noprog):
self.step()
@property
def state(self):
return np.concatenate([self.X, self.Y])
def set_state(self, x):
self.X = x[:self.K]
self.Y = x[self.K:]
@property
def parameters(self):
return np.array([self.F, self.h, self.c, self.b])
def erase_history(self):
self._history_X = []
self._history_Y_mean = []
self._history_Y2_mean = []
self._history_B = []
if not self.noYhist:
self._history_Y = []
@property
def history(self):
dic = {}
dic['X'] = xr.DataArray(self._history_X, dims=['time', 'x'], name='X')
dic['B'] = xr.DataArray(self._history_B, dims=['time', 'x'], name='B')
dic['Y_mean'] = xr.DataArray(self._history_Y_mean, dims=['time', 'x'], name='Y_mean')
dic['Y2_mean'] = xr.DataArray(self._history_Y2_mean, dims=['time', 'x'], name='Y2_mean')
if not self.noYhist:
dic['X_repeat'] = xr.DataArray( | np.repeat(self._history_X, self.J, 1) | numpy.repeat |
##
# \brief Gumbel copula.
from __future__ import print_function, absolute_import, division
import numpy as np
from starvine.bvcopula.copula.copula_base import CopulaBase
class GumbelCopula(CopulaBase):
"""!
@brief Gumbel copula
single paramter model
\f$\theta \in [1, \infty) \f$
"""
def __init__(self, rotation=0, init_params=None):
super(GumbelCopula, self).__init__(rotation, params=init_params)
self.thetaBounds = ((1 + 1e-9, np.inf),)
self.theta0 = (2.0, )
self.rotation = rotation
self.name = 'gumbel'
@CopulaBase._rotPDF
def _pdf(self, u, v, rotation=0, *theta):
"""!
@brief Probability density function for gumbel bivariate copula
"""
h1 = theta[0] - 1.0
# h2 = (1.0 - 2.0 ** theta[0]) / theta[0]
h2 = (1.0 - 2.0 * theta[0]) / theta[0]
h3 = 1.0 / theta[0]
UU = np.asarray(u)
VV = np.asarray(v)
h4 = -np.log(UU)
h5 = -np.log(VV)
h6 = np.power(h4, theta[0]) + np.power(h5, theta[0])
h7 = np.power(h6, h3)
p = np.exp(-h7+h4+h5)* | np.power(h4,h1) | numpy.power |
from Instruments.devGlobalFunctions import devGlobal
import numpy as np
import struct
import wx
import math
import pyte16 as pyte
from wx.lib.pubsub import pub
#Simple panel
class graphPanel(wx.Panel):
def __init__(self, parent, device):
wx.Panel.__init__(self,parent)
button_sizer = wx.BoxSizer(wx.VERTICAL)
btnWidth = 90
btnHeight = 40
blankBtnLabel = "&Blank"
self.device = device
#==============================================================================================================================================================
# Buttons are created with labels, sizes, and positions. The names of the buttons are arbitrary but
# it is a good practice to name them according to their function.
##Creating Buttons
self.button = []
rowSizer = wx.BoxSizer(wx.HORIZONTAL)
rowCounter = 0
buttonsInARow = 5
buttonsInACol = 3
for i in range(buttonsInARow*buttonsInACol):
btnIndexCol = i%buttonsInARow + 1
btnIndexRow = int(math.floor(i/buttonsInARow)) + 1
self.button.append(wx.Button(self, label = blankBtnLabel + "\n(" + str(btnIndexRow) + "," + str(btnIndexCol) + ")", size = (btnWidth, btnHeight)))
self.button[i].SetBackgroundColour("RED")
rowSizer.Add(self.button[i], 0, wx.ALL, 1)
if btnIndexCol == buttonsInARow:
button_sizer.Add(wx.StaticLine(self), 0, wx.ALL, 1)
button_sizer.Add(rowSizer, 0, wx.ALL, 0)
rowSizer = wx.BoxSizer(wx.HORIZONTAL)
self.SetSizerAndFit(button_sizer)
class SuperFastClass(devGlobal):
# Human readable name for the gui, note: Needs to be unique
name = "Super Fast"
def __init__(self, *args):
devGlobal.__init__(self, *args)
self.devComm.write("*CLS; *RST")
self.devComm.write(":INST:SEL 1")
self.devComm.write(":OUTPut:STATe OFF")
self.devComm.write(":INST:SEL 2")
self.devComm.write(":OUTPut:STATe OFF")
self.devComm.write(":INST:SEL 3")
self.devComm.write(":OUTPut:STATe OFF")
self.devComm.write(":INST:SEL 4")
self.devComm.write(":OUTPut:STATe OFF")
self.devComm.write(":INST:SEL 1")
self.ch1OnOff = 0
self.ch2OnOff = 0
self.ch3OnOff = 0
self.ch4OnOff = 0
self.numTabs = 2
self.additionalPanels = [(graphPanel, 'Graph Tab')]
self.label_list = self.label_list + [
('Gets Model\nOption', self.GetModelOption, ""),
('Segment\nLength', self.SegmentLength, ""),
('Tests', self.Tests, ""),
('Sets Operating\nChannel', self.SetOperatingChannel, ""),
('Standard\nSquare Wave', self.SetStandardSquareWave, ""),
('Sets Operating\nChannel On/Off', self.OutputOnOff, ""),
('Deletes\nAll Traces', self.DeleteAllTraces, ""),
('Queries\nTrace Points', self.TracePoints, ""),
('Ch1 Output\nOn/Off', self.Ch1OnOff, ""), # 1
('Ch2 Output\nOn/Off', self.Ch2OnOff, ""), # 2
('Sync\nCh1 and Ch2', self.SyncChannels, ""), # 3
('Ch1 Trigger\nInt/Ext', self.Ch1TrigIntExt, ""), # 4
('Ch2 Trigger\nInt/Ext', self.Ch2TrigIntExt, ""), # 5
('Ch1 Set\nWait Time', self.Ch1setWaitTime, ""), # 6
('Ch1 Set\nWait Time', self.Ch1setWaitTime, ""), # 6
('Ch1 Set\nWait Time', self.Ch1setWaitTime, ""), # 6
('Ch1 Set\nWait Time', self.Ch1setWaitTime, "") # 6
]
for label, function, *_ in self.label_list:
pub.subscribe(function, self.createTopic(label))
def GetModelOption(self, msg):
cmdString = "Queries memory option on "
self.answer = str(int(self.devComm.query("*OPT?")[2:4])*1e6)
self.printOut(cmdString)
done = 0
while done != 1:
print(self.devComm.query("*OPC?"))
done = int(self.devComm.query("*OPC?"))
def SegmentLength(self, msg):
cmdString = "Gets Segment Length on "
self.answer = self.devComm.query(":TRACE:DEFine?")
self.printOut(cmdString)
def Tests(self, msg):
cmdString = "Arbitrary waveform tests on "
cycle_len = 1024
num_cycles = 1
seg_len = cycle_len * num_cycles
wave1 = self.build_sine_wave(cycle_len,num_cycles, low_level=4000,
high_level=2 ** 14 - 4000)
wave2 = self.build_square_wave(cycle_len, num_cycles)
self.devComm.write(":INST:SEL 1")
self.devComm.write(":TRAC:MODE SING")
seg_nb = 1
self.devComm.write(':TRAC:DEF {0:d},{1:d}'.format(seg_nb, seg_len))
self.devComm.write(':TRAC:SEL {0:d}'.format(seg_nb))
self.send_binary_data(pref=':TRAC:DATA', bin_dat=wave1)
seg_nb = 2
self.devComm.write(':TRAC:DEF {0:d},{1:d}'.format(seg_nb, seg_len))
self.devComm.write(':TRAC:SEL {0:d}'.format(seg_nb))
self.send_binary_data(pref=':TRAC:DATA', bin_dat=wave2)
seg_num = [2, 1, 2, 1]
repeats = [1, 5, 1, 4]
jump = [0, 0, 0, 0]
seq_table = list(zip(repeats, seg_num, jump))
self.devComm.write(':SEQ:SELect 1')
self.download_sequencer_table(seq_table)
self.devComm.write(':SOURce:FUNCtion:MODE SEQ')
self.devComm.write(':SOUR:FREQ:RAST 1.0e9')
self.printOut(cmdString)
yNP = np.concatenate((wave1,wave2))
pub.sendMessage('PrintData', msg=[self.panelId,yNP])
def SetOperatingChannel(self, msg):
param = self.GetParamVector()
cmdString = "Sets the operating channel to " + param[0] + " on "
visaCmd = ":INST:SEL " + param[0]
self.devComm.write(visaCmd)
self.printOut(cmdString)
def SetStandardSquareWave(self, msg):
param = self.GetParamVector()
cmdString = "Sets a standard square wave with frequency " + param[2] + " on the operating channel on "
self.devComm.write(':SOURce:FUNCtion:MODE FIX')
self.devComm.write(":SOURce:FREQuency:CW " + param[2])
self.devComm.write(":SOURce:FUNCtion:SHAPe SQU")
self.devComm.write(":SOURce:SQUare:DCYC 50.0")
self.printOut(cmdString)
def OutputOnOff(self, msg):
cmdString = "Sets active channel output to on/off on "
if self.ch1OnOff == 0:
self.ch1OnOff = 1
self.devComm.write(":OUTPut:STATe ON")
else:
self.ch1OnOff = 0
self.devComm.write(":OUTPut:STATe OFF")
self.printOut(cmdString)
def DeleteAllTraces(self, msg):
cmdString = "Deletes all traces on "
self.devComm.write(":TRACE:DELETE:ALL")
self.printOut(cmdString)
def TracePoints(self, msg):
cmdString = "Queries trace points on "
self.answer = self.devComm.query(":TRAC:POINts?")
self.printOut(cmdString)
def Ch1OnOff(self, msg):
param = self.GetParamVector()
cmdString = "Sets Ch1 Output to " + param[2] + " on "
cmdString = cmdString + self.address + "\n"
self.printOut(self.cmdString)
def Ch2OnOff(self, msg):
param = self.GetParamVector()
cmdString = "Sets Ch2 Output to " + param[2] + " on "
cmdString = cmdString + self.address + "\n"
print(self.cmdString)
def SyncChannels(self, msg):
StringInit = "Sync Ch1 and Ch2 on "
self.cmdString = StringInit + self.address
print(self.cmdString)
def Ch1TrigIntExt(self, msg):
param = self.GetParamVector()
cmdString = "Sets Ch1 Trigger to " + param[2] + " on "
cmdString = cmdString + self.com_type + self.address + "\n"
print(self.cmdString)
def Ch2TrigIntExt(self, event):
param = self.GetParamVector()
StringInit = "Sets Ch2 Trigger to " + param[2] + " on "
self.cmdString = StringInit + self.com_type + self.address
print(self.cmdString)
def Ch1setWaitTime(self, event):
param = self.GetParamVector()
StringInit = "Sets Ch1 Wait Time to " + param[2] + " on "
self.cmdString = StringInit + self.com_type + self.address
print(self.cmdString)
def Ch2setWaitTime(self, event):
param = self.GetParamVector()
StringInit = "Sets Ch2 Wait Time to " + param[2] + " on "
self.cmdString = StringInit + self.com_type + self.address
print(self.cmdString)
def Ch1Ena10MExtRef(self, event):
StringInit = "Enable Ch1 10 MHz External Reference on "
self.cmdString = StringInit + self.com_type + self.address
print(self.cmdString)
def Ch2Ena10MExtRef(self, event):
StringInit = "Enable Ch2 10 MHz External Reference on "
self.cmdString = StringInit + self.com_type + self.address
print(self.cmdString)
def build_sine_wave(self, cycle_len, num_cycles=1, phase_degree=0, low_level=0, high_level=2**14-1):
cycle_len = int(cycle_len)
num_cycles = int(num_cycles)
if cycle_len <= 0 or num_cycles <= 0:
return None
dac_min = 0
dac_max = 2**14-1
wav_len = cycle_len * num_cycles
phase = float(phase_degree) * np.pi / 180.0
x = np.linspace(start=phase, stop=phase+2*np.pi, num=cycle_len, endpoint=False)
zero_val = (low_level + high_level) / 2.0
amplitude = (high_level - low_level) / 2.0
y = | np.sin(x) | numpy.sin |
# routes related to the boba run monitor
import os
import time
import pandas as pd
import numpy as np
from flask import jsonify, request
from .util import read_csv, read_json, write_json
from bobaserver import app, socketio, scheduler
from bobaserver.bobastats import sampling, sensitivity
import bobaserver.common as common
class BobaWatcher:
# static attributes
header_outcome = ['n_samples', 'mean', 'lower', 'upper']
def __init__(self, order, weights=None):
self.start_time = None
self.prev_time = 0 # for resume
# sampling order and weights
self.order = [uid - 1 for uid in order] # convert to 0-indexed
self.weights = weights
# results
self.last_merge_index = 0
self.outcomes = []
self.decision_scores = []
@staticmethod
def get_fn_outcome():
return os.path.join(app.bobarun.dir_log, 'outcomes.csv')
@staticmethod
def get_fn_save():
return os.path.join(app.bobarun.dir_log, 'execution_plan.json')
@staticmethod
def get_fn_sensitivity():
return os.path.join(app.bobarun.dir_log, 'sensitivity.csv')
@staticmethod
def get_header_sensitivity():
return ['n_samples', 'type'] + common.get_decision_list()
def _append_csv(self, fn, header, data):
# append to the csv if it exists, or create one
if os.path.exists(fn):
f = open(fn, 'a')
else:
f = open(fn, 'w')
f.write(','.join(header) + '\n')
for r in data:
f.write(','.join([str(i) for i in r]) + '\n')
f.close()
def _impute_null_CI(self, data, previous, col=0):
# impute NaN in CIs, assuming data is a 2D list [..., mean, lower, upper]
# where col is the column index of mean. Modify data in place.
for i, d in enumerate(data):
for j in [col + 1, col + 2]:
if | np.isnan(d[j]) | numpy.isnan |
def show_all_cv_processing_output():
# Start to implement new version of algorithm
import matplotlib.pyplot as plt
import os
import pydicom
import numpy as np
import cv2
import copy
import seaborn as sns
from numpy.random import randn
import matplotlib as mpl
from scipy import stats
def gen_ct_dicom_dict(ct_filelist):
CtCache = {}
CtCache["SOPInstanceUID"] = {} # Query ct data by the key SOPInstanceUID
CtCache["SliceLocation"] = {} # Query ct data by the key SliceLocation
CtCache["filepath"] = {} # Query ct data by the key filepath
for filepath in ct_filelist:
ct_fp = pydicom.read_file(filepath)
ct_SOPInstanceUID = ct_fp.SOPInstanceUID
ct_SliceLocation = ct_fp.SliceLocation
ct_filepath = filepath
ct_dict = {}
ct_dict["SOPInstanceUID"] = ct_SOPInstanceUID
ct_dict["SliceLocation"] = ct_SliceLocation
ct_dict["filepath"] = ct_filepath
# Additional appending data but not key for query
ct_dict["ImagePositionPatient_x"] = ct_fp.ImagePositionPatient[0] # CT_origin_x
ct_dict["ImagePositionPatient_y"] = ct_fp.ImagePositionPatient[1] # CT_origin_y
ct_dict["ImagePositionPatient_z"] = ct_fp.ImagePositionPatient[2] # CT_origin_z, Same as SliceLocation
ct_dict["PixelSpacing_x"] = ct_fp.PixelSpacing[0] # CT_ps_x
ct_dict["PixelSpacing_y"] = ct_fp.PixelSpacing[1] # CT_ps_y
ct_dict["TableHeight"] = ct_fp.TableHeight # Table_H
ct_dict["Columns"] = ct_fp.Columns # CT_columns
ct_dict["Rows"] = ct_fp.Rows # CT_rows
ct_dict["ROIName"] = {}
ct_dict["pixel_array"] = copy.deepcopy(ct_fp.pixel_array)
ct_dict["RescaleSlope"] = ct_fp.RescaleSlope
ct_dict["RescaleIntercept"] = ct_fp.RescaleIntercept
ct_dict["rescale_pixel_array"] = ct_fp.pixel_array * ct_fp.RescaleSlope + ct_fp.RescaleIntercept
# Wish can get contourdata[x,y,z...] by ct_dict["ROIName"][roiName]["ContourData"]
CtCache["SOPInstanceUID"][ct_SOPInstanceUID] = ct_dict
CtCache["SliceLocation"][ct_SliceLocation] = ct_dict
CtCache["filepath"][ct_filepath] = ct_dict
return CtCache
pass
def get_ct_filelist_by_folder(folder):
ct_filelist = []
for file in os.listdir(folder):
# print(file)
filepath = "{}\\{}".format(folder, file)
file_exists = os.path.isfile(filepath)
if not file_exists:
continue
ct_fp = None
try:
ct_fp = pydicom.read_file(filepath)
except:
# Not dicom file
continue
if ct_fp.Modality != 'CT':
continue
# print(filepath)
ct_filelist.append(filepath)
return ct_filelist
def get_max_contours_by_filter_img(A, filter_img):
# gray_image = cv2.cvtColor(filter_img, cv2.COLOR_RGB2GRAY)
gray_image = filter_img
# findContours
# _, contours, _ = cv2.findContours(gray_image, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
_, contours, _ = cv2.findContours(gray_image, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
return contours
def get_max_contours(A, constant_value=None):
constant = None
if constant_value == None:
# Algoruthm to find constant value
data = A.ravel()
sorted_data = np.copy(data)
sorted_data.sort()
constant = sorted_data[-20] - 100
else:
constant = constant_value
# The RGB format for cv2 is
filter_img = | np.zeros((A.shape[0], A.shape[1], 3), np.uint8) | numpy.zeros |
import numpy as np
from adventcode.utils import read_file
file_path = './input/day8.txt'
def parse_file(file_content):
rows = file_content.split('\n')
data = [row.split(' | ') for row in rows]
for i, entry in enumerate(data):
data[i][0] = entry[0].split()
data[i][1] = entry[1].split()
return data
data = parse_file(read_file(file_path))
l_mapping = {1: 2, 4: 4, 7: 3, 8: 7}
# part 1
input_val = [row[0] for row in data]
output_val = [row[1] for row in data]
np_input = np.array(input_val)
np_output = np.array(output_val)
np_len = np.vectorize(len)(np_output)
print(np.isin(np_len, list(l_mapping.values())).sum())
# part 2
# _a'
# f'|_| b' inside: g'
# e'|_| c'
# d'
def length_search(np_array_in, size):
# only for searching 1 4 7 8
lengths = np.vectorize(len)(np_array_in)
result = np_array_in[lengths == size]
assert len(result) == 1
return np_array_in[lengths == size].item()
def sort_string(a_string):
sorted_characters = sorted(a_string)
return "".join(sorted_characters)
collect = []
for i in range(len(input_val)):
input_row = input_val[i]
# print(length_search(np.array(input_row), l_mapping.get(1)))
# start with decoding 1478
decode = {num: length_search( | np.array(input_row) | numpy.array |
# Copyright 2019 <NAME> & <NAME>
#
# This file is part of OBStools.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys
import numpy as np
from obspy.core import Stream
import matplotlib.pyplot as plt
def decompose(RF_r, RF_t, t1=0., t2=1., plot_f=False, plot_comps=False):
"""
Function to decompose radial and transverse receiver function
streams into back-azimuth harmonics and determine the main
orientation ``azim``, obtained by minimizing the H1' component
between ``t1`` and ``t2``.
Parameters
----------
RF_r : :class:`~obspy.core.Stream`
Stream containing the radial component receiver functions
RF_t : :class:`~obspy.core.Stream`
Stream containing the transverse component receiver functions
t1 : float
Minimum time over which to calculate ``azcorr`` (sec)
t2 : float
Maximum time over which to calculate ``azcorr`` (sec)
Returns
-------
azcorr : float
Direction (azimuth) along which the Ht_0 harmonic component
is minimized (between ``t1`` and ``t2``)
RMS : :class:`~numpy.ndarray`
Root-mean-square misfit used to determine azcorr
hr0_rot : :class:`~numpy.ndarray`
Rotated Hr_0 component
ht0_rot : :class:`~numpy.ndarray`
Rotated Ht_0 component
"""
if not isinstance(RF_r, Stream):
raise(Exception("Input radial component is not a Stream object"))
if not isinstance(RF_t, Stream):
raise(Exception("Input transverse component is not a Stream object"))
# Some integers
nbin = len(RF_r)
nn = len(RF_r[0].data)
dt = RF_r[0].stats.delta
daz = 0.1
naz = int(180./daz)
deg2rad = np.pi/180.
# Initialize work arrays
taxis = np.arange(-nn/2, nn/2)*dt
trange = np.where((taxis>t1) & (taxis<t2))[0]
print(trange)
print(taxis[trange])
nt = len(trange)
hr0_rot = np.zeros((nt, naz))
ht0_rot = np.zeros((nt, naz))
hr0 = np.zeros(nt); hr1 = np.zeros(nt); hr2 = np.zeros(nt)
hr3 = np.zeros(nt); hr4 = np.zeros(nt); meanr = np.zeros(nt)
ht0 = np.zeros(nt); ht1 = np.zeros(nt); ht2 = np.zeros(nt)
ht3 = np.zeros(nt); ht4 = np.zeros(nt); meant = np.zeros(nt)
# Loop over each depth step
for ii, it in enumerate(trange):
# Initialize work arrays
d_r = np.zeros(nbin)
d_t = np.zeros(nbin)
G = np.zeros((nbin, 5))
# Build arrays and matrices
for itrace in range(nbin):
baz = RF_r[itrace].stats.baz
d_r[itrace] = RF_r[itrace].data[it]
d_t[itrace] = RF_t[itrace].data[it]
G[itrace, 0] = 1.0
G[itrace, 1] = np.cos(deg2rad*baz)
G[itrace, 2] = np.sin(deg2rad*baz)
G[itrace, 3] = np.cos(2.*deg2rad*baz)
G[itrace, 4] = np.sin(2.*deg2rad*baz)
# Solve using damped least squares
lam=1.e-25
m_r = np.linalg.solve(np.dot(G.T, G)+lam*np.identity(G.shape[1]),
np.dot(G.T, d_r))
m_t = np.linalg.solve(np.dot(G.T, G)+lam*np.identity(G.shape[1]),
np.dot(G.T, d_t))
meanr[ii] = np.mean(d_r)
hr0[ii] = m_r[0]
hr1[ii] = m_r[1]
hr2[ii] = m_r[2]
hr3[ii] = m_r[3]
hr4[ii] = m_r[4]
meant[ii] = np.mean(d_t)
ht0[ii] = m_t[0]
ht1[ii] = m_t[1]
ht2[ii] = m_t[2]
ht3[ii] = m_t[3]
ht4[ii] = m_t[4]
for iaz in range(naz):
phi = iaz*daz*deg2rad
hr0_rot[ii, iaz] = np.cos(phi)*m_r[0] + np.sin(phi)*m_t[0]
ht0_rot[ii, iaz] = -np.sin(phi)*m_r[0] + np.cos(phi)*m_t[0]
# Minimize misfit of rotated transverse component over specific
# time range to find azim
RMS = np.zeros(naz)
for iaz in range(naz):
RMS[iaz] = np.sqrt(np.mean(np.square(ht0_rot[:, iaz])))
# RMS[iaz] = np.sqrt(np.mean(np.square(ht0_rot[indmin:indmax, iaz])))
# Azimuth of H1
indaz = np.argmin(RMS)
azcorr = indaz*daz
# Resolve ambiguity based on radial component
if | np.mean(hr0_rot[:, indaz]) | numpy.mean |
# -*- coding: utf-8 -*-
'''
读入单个episode的数据,拼接新旧数据集
Created on 2021年2月13日22:51:45
@author: hyhuang
Python 3.6.8
'''
import numpy as np
import os
import sys
dir = './data/'
action_list = os.listdir(dir+'action')
reward_list = os.listdir(dir+'reward')
state_list = os.listdir(dir+'state')
# print(action_list,'\n',reward_list,'\n',state_list)
def concat_all():
'''
将若干个记录单一episode的文件拼成一个大文件
concat all the single-episode files to a whole file
'''
aa,rr,ss = [],[],[]
for i in range(len(action_list)):
if len(aa) == 0:
aa = np.load(dir+'action/'+action_list[0])
rr = np.load(dir+'reward/'+reward_list[0])
ss = np.load(dir+'state/'+state_list[0])
else:
a_tmp = np.load(dir+'action/'+action_list[i])
r_tmp = np.load(dir+'reward/'+reward_list[i])
s_tmp = np.load(dir+'state/'+state_list[i])
aa = np.concatenate((aa,a_tmp))
rr = np.concatenate((rr,r_tmp))
ss = np.concatenate((ss,s_tmp))
print(aa.shape,rr.shape,ss.shape)
return aa,rr,ss
def concat_one(aa,rr,ss):
'''
concat all the single-episode files to a whole file
'''
for i in range(len(action_list)):
if len(aa) == 0:
aa = np.load(dir+'action/'+action_list[0])
rr = np.load(dir+'reward/'+reward_list[0])
ss = np.load(dir+'state/'+state_list[0])
else:
a_tmp = np.load(dir+'action/'+action_list[i])
r_tmp = np.load(dir+'reward/'+reward_list[i])
s_tmp = np.load(dir+'state/'+state_list[i])
aa = np.concatenate((aa,a_tmp))
rr = np.concatenate((rr,r_tmp))
ss = np.concatenate((ss,s_tmp))
print(aa.shape,rr.shape,ss.shape)
return aa,rr,ss
def save_file():
np.save(dir+'aa.npy',aa)
np.save(dir+'ss.npy',ss)
np.save(dir+'rr.npy',rr)
sss = np.load('./data/sss.npy')
aaa = np.load('./data/aaa.npy')
rrr = | np.load('./data/rrr.npy') | numpy.load |
"""
Ranking
=======
Metrics to use for ranking models.
"""
import numpy as np
import numpy.ma as ma
from typing import Tuple
from pytypes import typechecked
@typechecked
def check_arrays(y_true: np.ndarray, y_prob: np.ndarray) -> None :
# Make sure that inputs this conforms to our expectations
assert isinstance(y_true, np.ndarray), AssertionError(
'Expect y_true to be a {expected}. Got {actual}'
.format(expected=np.ndarray, actual=type(y_true))
)
assert isinstance(y_prob, np.ndarray), AssertionError(
'Expect y_prob to be a {expected}. Got {actual}'
.format(expected=np.ndarray, actual=type(y_prob))
)
assert y_true.shape == y_prob.shape, AssertionError(
'Shapes must match. Got y_true={true_shape}, y_prob={prob_shape}'
.format(true_shape=y_true.shape, prob_shape=y_prob.shape)
)
assert len(y_true.shape) == 2, AssertionError(
'Shapes should be of rank 2. Got {rank}'
.format(rank=len(y_true.shape))
)
uniques = np.unique(y_true)
assert len(uniques) <= 2, AssertionError(
'Expected labels: [0, 1]. Got: {uniques}'
.format(uniques=uniques)
)
@typechecked
def check_k(n_items: int, k: int) -> None:
# Make sure that inputs conform to our expectations
assert isinstance(k, int), AssertionError(
'Expect k to be a {expected}. Got {actual}'
.format(expected=int, actual=type(k))
)
assert 0 <= k <= n_items, AssertionError(
'Expect 0 <= k <= {n_items}. Got {k}'
.format(n_items=n_items, k=k)
)
@typechecked
def recall_at_k(y_true: np.ndarray, y_prob: np.ndarray, k: int) -> float:
"""
Calculates recall at k for binary classification ranking problems. Recall
at k measures the proportion of total relevant items that are found in the
top k (in ranked order by y_prob). If k=5, there are 6 total relevant documents,
and 3 of the top 5 items are relevant, the recall at k will be 0.5.
Samples where y_true is 0 for all labels are filtered out because there will be
0 true positives and false negatives.
Args:
y_true (~np.ndarray): Flags (0, 1) which indicate whether a column is
relevant or not. size=(n_samples, n_items)
y_prob (~np.ndarray): The predicted probability that the given flag
is relevant. size=(n_samples, n_items)
k (int): Number of items to evaluate for relevancy, in descending
sorted order by y_prob
Returns:
recall (float): The recall at k
Example:
>>> y_true = np.array([
[0, 0, 1],
[0, 1, 0],
[1, 0, 0],
])
>>> y_prob = np.array([
[0.4, 0.6, 0.3],
[0.1, 0.2, 0.9],
[0.9, 0.6, 0.3],
])
>>> recall_at_k(y_true, y_prob, 2)
0.6666666666666666
In the example above, each of the samples has 1 total relevant document.
For the first sample, there are 0 relevant documents in the top k for k=2,
because 0.3 is the 3rd value for y_prob in descending order. For the second
sample, there is 1 relevant document in the top k, because 0.2 is the 2nd
value for y_prob in descending order. For the third sample, there is 1
relevant document in the top k, because 0.9 is the 1st value for y_prob in
descending order. Averaging the values for all of these samples (0, 1, 1)
gives a value for recall at k of 2/3.
"""
check_arrays(y_true, y_prob)
check_k(y_true.shape[1], k)
# Filter out rows of all zeros
mask = y_true.sum(axis=1).astype(bool)
y_prob = y_prob[mask]
y_true = y_true[mask]
# Extract shape components
n_samples, n_items = y_true.shape
# List of locations indexing
y_prob_index_order = np.argsort(-y_prob)
rows = np.reshape(np.arange(n_samples), (-1, 1))
ranking = y_true[rows, y_prob_index_order]
# Calculate number true positives for numerator and number of relevant documents for denominator
num_tp = np.sum(ranking[:, :k], axis=1)
num_relevant = np.sum(ranking, axis=1)
# Calculate recall at k
recall = np.mean(num_tp / num_relevant)
return recall
@typechecked
def precision_at_k(y_true: np.ndarray, y_prob: np.ndarray, k: int) -> float:
"""
Calculates precision at k for binary classification ranking problems.
Precision at k measures the proportion of items in the top k (in ranked
order by y_prob) that are relevant (as defined by y_true). If k=5, and
3 of the top 5 items are relevant, the precision at k will be 0.6.
Args:
y_true (~np.ndarray): Flags (0, 1) which indicate whether a column is
relevant or not. size=(n_samples, n_items)
y_prob (~np.ndarray): The predicted probability that the given flag
is relevant. size=(n_samples, n_items)
k (int): Number of items to evaluate for relevancy, in descending
sorted order by y_prob
Returns:
precision_k (float): The precision at k
Example:
>>> y_true = np.array([
[0, 0, 1],
[0, 1, 0],
[1, 0, 0],
])
>>> y_prob = np.array([
[0.4, 0.6, 0.3],
[0.1, 0.2, 0.9],
[0.9, 0.6, 0.3],
])
>>> precision_at_k(y_true, y_prob, 2)
0.3333333333333333
For the first sample, there are 0 relevant documents in the top k for k=2,
because 0.3 is the 3rd value for y_prob in descending order. For the second
sample, there is 1 relevant document in the top k, because 0.2 is the 2nd
value for y_prob in descending order. For the third sample, there is 1
relevant document in the top k, because 0.9 is the 1st value for y_prob in
descending order. Because k=2, the values for precision of k for each sample
are 0, 1/2, and 1/2 respectively. Averaging these gives a value for precision
at k of 1/3.
"""
check_arrays(y_true, y_prob)
check_k(y_true.shape[1], k)
# Extract shape components
n_samples, n_items = y_true.shape
# List of locations indexing
y_prob_index_order = np.argsort(-y_prob)
rows = np.reshape(np.arange(n_samples), (-1, 1))
ranking = y_true[rows, y_prob_index_order]
# Calculate number of true positives for numerator
num_tp = np.sum(ranking[:, :k], axis=1)
# Calculate precision at k
precision = np.mean(num_tp / k)
return precision
@typechecked
def mean_reciprocal_rank(y_true: np.ndarray, y_prob: np.ndarray) -> ma:
"""
Gets a positional score about how well you did at rank 1, rank 2,
etc. The resulting vector is of size (n_items,) but element 0 corresponds to
rank 1 not label 0.
Args:
y_true (~np.ndarray): Flags (0, 1) which indicate whether a column is
relevant or not. size=(n_samples, n_items)
y_prob (~np.ndarray): The predicted probability that the given flag
is relevant. size=(n_samples, n_items)
Returns:
mrr (~np.ma.array): The positional ranking score. This will be masked
for ranks where there were no relevant values. size=(n_items,)
"""
check_arrays(y_true, y_prob)
# Extract shape components
n_samples, n_items = y_true.shape
# Determine the ranking order
rank_true = np.flip(np.argsort(y_true, axis=1), axis=1)
rank_prob = np.flip(np.argsort(y_prob, axis=1), axis=1)
# Compute reciprocal ranks
reciprocal = 1.0 / (np.argsort(rank_prob, axis=1) + 1)
# Now order the reciprocal ranks by the true order
rows = np.reshape(np.arange(n_samples), (-1, 1))
cols = rank_true
ordered = reciprocal[rows, cols]
# Create a masked array of true labels only
ma = np.ma.array(ordered, mask= | np.isclose(y_true[rows, cols], 0) | numpy.isclose |
def test_add():
import numpy as np
import pyclesperanto_prototype as cle
input1 = cle.push(np.asarray([[1, 2, 3]]))
input2 = cle.push(np.asarray([[4, 5, 6]]))
reference = cle.push(np.asarray([[5, 7, 9]]))
output = input1 + input2
result = cle.pull(output)
print(result)
assert np.array_equal(result, reference)
def test_add_with_scalar():
import numpy as np
import pyclesperanto_prototype as cle
input1 = cle.push(np.asarray([[1, 2, 3]]))
input2 = 5
reference = cle.push(np.asarray([[6, 7, 8]]))
output = input1 + input2
result = cle.pull(output)
print(result)
assert np.array_equal(result, reference)
def test_add_with_np():
import numpy as np
import pyclesperanto_prototype as cle
input1 = cle.push(np.asarray([[4, 2, -8]]))
input2 = np.asarray([[2, 2, 2]])
reference = cle.push(np.asarray([[6, 4, -6]]))
output = input1 + input2
result = cle.pull(output)
print(result)
assert np.array_equal(result, reference)
def test_iadd():
import numpy as np
import pyclesperanto_prototype as cle
input1 = cle.push(np.asarray([[1, 2, 3]]))
input2 = cle.push(np.asarray([[4, 5, 6]]))
reference = cle.push(np.asarray([[5, 7, 9]]))
input1 += input2
result = cle.pull(input1)
print(result)
assert np.array_equal(result, reference)
def test_iadd_with_scalar():
import numpy as np
import pyclesperanto_prototype as cle
input1 = cle.push(np.asarray([[1, 2, 3]]))
input2 = 5
reference = cle.push(np.asarray([[6, 7, 8]]))
input1 += input2
result = cle.pull(input1)
print(result)
assert np.array_equal(result, reference)
def test_iadd_with_np():
import numpy as np
import pyclesperanto_prototype as cle
input1 = cle.push(np.asarray([[4, 2, -8]]))
input2 = np.asarray([[2, 2, 2]])
reference = cle.push(np.asarray([[6, 4, -6]]))
input1 += input2
result = cle.pull(input1)
print(result)
assert np.array_equal(result, reference)
def test_subtract():
import numpy as np
import pyclesperanto_prototype as cle
input1 = cle.push(np.asarray([[4, 2, 3]]))
input2 = cle.push(np.asarray([[1, 5, 6]]))
reference = cle.push(np.asarray([[3, -3, -3]]))
output = input1 - input2
result = cle.pull(output)
print(result)
assert np.array_equal(result, reference)
def test_subtract_with_scalar():
import numpy as np
import pyclesperanto_prototype as cle
input1 = cle.push(np.asarray([[4, 2, 3]]))
input2 = 5
reference = cle.push(np.asarray([[-1, -3, -2]]))
output = input1 - input2
result = cle.pull(output)
print(result)
assert np.array_equal(result, reference)
def test_subtract_with_np():
import numpy as np
import pyclesperanto_prototype as cle
input1 = cle.push(np.asarray([[4, 2, 3]]))
input2 = np.asarray([[1, 5, 6]])
reference = cle.push(np.asarray([[3, -3, -3]]))
output = input1 - input2
result = cle.pull(output)
print(result)
assert np.array_equal(result, reference)
def test_isubtract():
import numpy as np
import pyclesperanto_prototype as cle
input1 = cle.push(np.asarray([[4, 2, 3]]))
input2 = cle.push(np.asarray([[1, 5, 6]]))
reference = cle.push(np.asarray([[3, -3, -3]]))
input1 -= input2
result = cle.pull(input1)
print(result)
assert np.array_equal(result, reference)
def test_isubtract_with_scalar():
import numpy as np
import pyclesperanto_prototype as cle
input1 = cle.push(np.asarray([[4, 2, 3]]))
input2 = 5
reference = cle.push(np.asarray([[-1, -3, -2]]))
input1 -= input2
result = cle.pull(input1)
print(result)
assert np.array_equal(result, reference)
def test_isubtract_with_np():
import numpy as np
import pyclesperanto_prototype as cle
input1 = cle.push(np.asarray([[4, 2, 3]]))
input2 = np.asarray([[1, 5, 6]])
reference = cle.push(np.asarray([[3, -3, -3]]))
input1 -= input2
result = cle.pull(input1)
print(result)
assert np.array_equal(result, reference)
def test_divide():
import numpy as np
import pyclesperanto_prototype as cle
input1 = cle.push(np.asarray([[4, 2, -8]]))
input2 = cle.push(np.asarray([[2, 2, 2]]))
reference = cle.push(np.asarray([[2, 1, -4]]))
output = input1 / input2
result = cle.pull(output)
print(result)
assert np.array_equal(result, reference)
def test_divide_with_scalar():
import numpy as np
import pyclesperanto_prototype as cle
input1 = cle.push(np.asarray([[4, 2, -8]]))
input2 = 2
reference = cle.push(np.asarray([[2, 1, -4]]))
output = input1 / input2
result = cle.pull(output)
print(result)
assert np.array_equal(result, reference)
def test_divide_with_np():
import numpy as np
import pyclesperanto_prototype as cle
input1 = cle.push(np.asarray([[4, 2, -8]]))
input2 = np.asarray([[2, 2, 2]])
reference = cle.push(np.asarray([[2, 1, -4]]))
output = input1 / input2
result = cle.pull(output)
print(result)
assert np.array_equal(result, reference)
def test_idivide():
import numpy as np
import pyclesperanto_prototype as cle
input1 = cle.push(np.asarray([[4, 2, -8]]))
input2 = cle.push(np.asarray([[2, 2, 2]]))
reference = cle.push(np.asarray([[2, 1, -4]]))
input1 /= input2
result = cle.pull(input1)
print(result)
assert np.array_equal(result, reference)
def test_idivide_with_scalar():
import numpy as np
import pyclesperanto_prototype as cle
input1 = cle.push(np.asarray([[4, 2, -8]]))
input2 = 2
reference = cle.push(np.asarray([[2, 1, -4]]))
input1 /= input2
result = cle.pull(input1)
print(result)
assert np.array_equal(result, reference)
def test_idivide_with_np():
import numpy as np
import pyclesperanto_prototype as cle
input1 = cle.push(np.asarray([[4, 2, -8]]))
input2 = np.asarray([[2, 2, 2]])
reference = cle.push(np.asarray([[2, 1, -4]]))
input1 /= input2
result = cle.pull(input1)
print(result)
assert np.array_equal(result, reference)
def test_multiply():
import numpy as np
import pyclesperanto_prototype as cle
input1 = cle.push(np.asarray([[4, 2, -8]]))
input2 = cle.push(np.asarray([[2, 2, 2]]))
reference = cle.push(np.asarray([[8, 4, -16]]))
output = input1 * input2
result = cle.pull(output)
print(result)
assert np.array_equal(result, reference)
def test_multiply_with_scalar():
import numpy as np
import pyclesperanto_prototype as cle
input1 = cle.push(np.asarray([[4, 2, -8]]))
input2 = 2
reference = cle.push(np.asarray([[8, 4, -16]]))
output = input1 * input2
result = cle.pull(output)
print(result)
assert np.array_equal(result, reference)
def test_multiply_with_np():
import numpy as np
import pyclesperanto_prototype as cle
input1 = cle.push(np.asarray([[4, 2, -8]]))
input2 = np.asarray([[2, 2, 2]])
reference = cle.push(np.asarray([[8, 4, -16]]))
output = input1 * input2
result = cle.pull(output)
print(result)
assert np.array_equal(result, reference)
def test_imultiply():
import numpy as np
import pyclesperanto_prototype as cle
input1 = cle.push(np.asarray([[4, 2, -8]]))
input2 = cle.push(np.asarray([[2, 2, 2]]))
reference = cle.push(np.asarray([[8, 4, -16]]))
input1 *= input2
result = cle.pull(input1)
print(result)
assert np.array_equal(result, reference)
def test_imultiply_with_scalar():
import numpy as np
import pyclesperanto_prototype as cle
input1 = cle.push(np.asarray([[4, 2, -8]]))
input2 = 2
reference = cle.push(np.asarray([[8, 4, -16]]))
input1 *= input2
result = cle.pull(input1)
print(result)
assert np.array_equal(result, reference)
def test_imultiply_with_np():
import numpy as np
import pyclesperanto_prototype as cle
input1 = cle.push(np.asarray([[4, 2, -8]]))
input2 = np.asarray([[2, 2, 2]])
reference = cle.push(np.asarray([[8, 4, -16]]))
input1 *= input2
result = cle.pull(input1)
print(result)
assert np.array_equal(result, reference)
def test_gt():
import numpy as np
import pyclesperanto_prototype as cle
input1 = cle.push(np.asarray([[4, 2, -8]]))
input2 = cle.push(np.asarray([[2, 2, 2]]))
reference = cle.push(np.asarray([[1, 0, 0]]))
output = input1 > input2
result = cle.pull(output)
print(result)
assert | np.array_equal(result, reference) | numpy.array_equal |
import unittest
import unittest.mock
import numpy as np
from ConfigSpace import CategoricalHyperparameter, \
UniformFloatHyperparameter, UniformIntegerHyperparameter, \
OrdinalHyperparameter, EqualsCondition
from smac.epm.rf_with_instances import RandomForestWithInstances
from smac.epm.util_funcs import get_types
import smac
import smac.configspace
class TestRFWithInstances(unittest.TestCase):
def _get_cs(self, n_dimensions):
configspace = smac.configspace.ConfigurationSpace()
for i in range(n_dimensions):
configspace.add_hyperparameter(UniformFloatHyperparameter('x%d' % i, 0, 1))
return configspace
def test_predict_wrong_X_dimensions(self):
rs = np.random.RandomState(1)
model = RandomForestWithInstances(
configspace=self._get_cs(10),
types=np.zeros((10,), dtype=np.uint),
bounds=list(map(lambda x: (0, 10), range(10))),
seed=1,
)
X = rs.rand(10)
self.assertRaisesRegex(ValueError, "Expected 2d array, got 1d array!",
model.predict, X)
X = rs.rand(10, 10, 10)
self.assertRaisesRegex(ValueError, "Expected 2d array, got 3d array!",
model.predict, X)
X = rs.rand(10, 5)
self.assertRaisesRegex(ValueError, "Rows in X should have 10 entries "
"but have 5!",
model.predict, X)
def test_predict(self):
rs = np.random.RandomState(1)
X = rs.rand(20, 10)
Y = rs.rand(10, 1)
model = RandomForestWithInstances(
configspace=self._get_cs(10),
types=np.zeros((10,), dtype=np.uint),
bounds=list(map(lambda x: (0, 10), range(10))),
seed=1,
)
model.train(X[:10], Y[:10])
m_hat, v_hat = model.predict(X[10:])
self.assertEqual(m_hat.shape, (10, 1))
self.assertEqual(v_hat.shape, (10, 1))
def test_train_with_pca(self):
rs = | np.random.RandomState(1) | numpy.random.RandomState |
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.ensemble import ExtraTreesRegressor
### Pre-process training data ###
df_train = pd.read_csv("train.csv")
df_train.head()
feats = df_train.drop("revenue", axis=1)
X = feats.values #features
y = df_train["revenue"].values #target
# Build a forest and compute the feature importances
forest = ExtraTreesRegressor(n_estimators=250,
random_state=0)
forest.fit(X, y)
importances = forest.feature_importances_
std = np.std([tree.feature_importances_ for tree in forest.estimators_],
axis=0)
indices = | np.argsort(importances) | numpy.argsort |
# -*- coding: utf-8 -*-
from __future__ import division, print_function
import numpy as np
from scipy.signal import remez, freqz
import matplotlib.pyplot as plt
def bellanger_estimate(deltap, deltas, width, fs):
"""
Estimate the number of taps required for the given filter specifications.
"""
n = (-2/3)*np.log10(10*deltap*deltas)*fs/width
n = int(np.ceil(n))
return n
def remez_lowpass(deltap, deltas, cutoff, width, fs):
""" estimate coeffient of each taps"""
numtaps = bellanger_estimate(deltap, deltas, width, fs)
numtaps |= 1 # Bitwise OR with 1 to ensure an odd number of taps.
trans_lo = cutoff - 0.5*width
trans_hi = cutoff + 0.5*width
taps = remez(numtaps,
bands=[0, trans_lo, trans_hi, 0.5*fs],
desired=[1, 0],
weight=[1/deltap, 1/deltas],
fs=fs)
return taps
#---------------------------------------
# User inputs...
# Frequency values in Hz
fs = 1000.0
cutoff = 180.0
width = 30.0
# Desired pass band ripple and stop band attenuation
deltap = 0.005
deltas = 0.002
print(u"Pass band: 1 ± %g ([%.3g, %.3g] dB)" %
(deltap, 20*np.log10(1 - deltap), 20*np.log10(1 + deltap)))
print("Stop band rejection: %g (%.3g dB)" % (deltas, -20*np.log10(deltas),))
#---------------------------------------
# Design the filter...
taps = remez_lowpass(deltap, deltas, cutoff, width, fs)
#----------------------------------------
# Plot the frequency response...
upper_ripple_db = 20*np.log10(1 + deltap)
lower_ripple_db = 20*np.log10(1 - deltap)
stop_db = -20* | np.log10(deltas) | numpy.log10 |
# Copyright (c) 2020 Horizon Robotics. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
from absl import logging
import gin
import math
import numpy as np
import weakref
import threading
try:
import carla
except ImportError:
carla = None
if carla is not None:
try:
from agents.navigation.global_route_planner import GlobalRoutePlanner
from agents.navigation.global_route_planner_dao import GlobalRoutePlannerDAO
from agents.navigation.local_planner import RoadOption
except ImportError:
logging.fatal("Cannot import carla agents package. Please add "
"$CARLA_ROOT/PythonAPI/carla to your PYTHONPATH")
carla = None
import alf
MINIMUM_RENDER_WIDTH = 640
MINIMUM_RENDER_HEIGHT = 240
class SensorBase(abc.ABC):
"""Base class for sersors."""
def __init__(self, parent_actor):
"""
Args:
parent_actor (carla.Actor): the parent actor of this sensor
"""
self._sensor = None
self._parent = parent_actor
def destroy(self):
"""Return the commands for destroying this sensor.
Use ``carla.Client.apply_batch_sync()`` to actually destroy the sensor.
Returns:
list[carla.command]: the commands used to destroy the sensor.
"""
if self._sensor is not None:
self._sensor.stop()
return [carla.command.DestroyActor(self._sensor)]
else:
return []
@abc.abstractmethod
def get_current_observation(self, current_frame):
"""Get the current observation.
Args:
current_frame (int): current frame no. For some sensors, they may
not receive any data in the most recent tick. ``current_frame``
will be compared against the frame no. of the last received data
to make sure that the data is correctly interpretted. Note that
if the sensor receives event in the most recent frame,
event.frame should be equal to current_frame - 1.
Returns:
nested np.ndarray: sensor data received in the last tick.
"""
@abc.abstractmethod
def observation_spec(self):
"""Get the observation spec of this sensor.
Returns:
nested TensorSpec:
"""
@abc.abstractmethod
def observation_desc(self):
"""Get the description about the observation of this sensor.
Returns:
nested str: each str corresponds to one TensorSpec from
``observatin_spec()``.
"""
@gin.configurable
class CollisionSensor(SensorBase):
"""CollisionSensor for getting collision signal.
It gets the impulses from the collisions during the last tick.
TODO: include event.other_actor in the sensor result.
"""
def __init__(self, parent_actor, max_num_collisions=4):
"""
Args:
parent_actor (carla.Actor): the parent actor of this sensor
max_num_collisions (int): maximal number of collisions to be included
"""
super().__init__(parent_actor)
self._max_num_collisions = max_num_collisions
world = self._parent.get_world()
bp = world.get_blueprint_library().find('sensor.other.collision')
self._sensor = world.spawn_actor(
bp, carla.Transform(), attach_to=self._parent)
# We need to pass the lambda a weak reference to self to avoid circular
# reference.
weak_self = weakref.ref(self)
self._sensor.listen(lambda event: CollisionSensor._on_collision(
weak_self, event))
self._frame = 0
self._prev_cached_frame = -1
self._cached_impulse = None
self._empty_impulse = np.zeros([max_num_collisions, 3],
dtype=np.float32)
self._collisions = []
self._lock = threading.Lock()
@staticmethod
def _on_collision(weak_self, event):
self = weak_self()
if not self:
return
impulse = event.normal_impulse
self._frame = event.frame
with self._lock:
self._collisions.append([impulse.x, impulse.y, impulse.z])
def observation_spec(self):
return alf.TensorSpec([self._max_num_collisions, 3])
def observation_desc(self):
return (
"Impulses from collision during the last tick. Each impulse is "
"a 3-D vector. At most %d collisions are used. The result is padded "
"with zeros if there are less than %d collisions" %
(self._max_num_collisions, self._max_num_collisions))
def get_current_observation(self, current_frame):
"""Get the current observation.
Args:
current_frame (int): current frame no. CollisionSensor may not
not receive any data in the most recent tick. ``current_frame``
will be compared against the frame no. of the last received data
to make sure that the data is correctly interpretted.
Returns:
np.ndarray: Impulses from collision during the last tick. Each
impulse is a 3-D vector. At most ``max_num_collisions``
collisions are used. The result is padded with zeros if there
are less than ``max_num_collisions`` collisions
"""
if current_frame == self._prev_cached_frame:
return self._cached_impulse
assert current_frame > self._prev_cached_frame, (
"Cannot get frames %d older than previously cached one %d!" %
(current_frame, self._prev_cached_frame))
with self._lock:
impulses = np.array(self._collisions, dtype=np.float32)
self._collisions = []
n = impulses.shape[0]
if n == 0:
impulses = self._empty_impulse
elif n < self._max_num_collisions:
impulses = np.concatenate([
np.zeros([self._max_num_collisions - n, 3], dtype=np.float32),
impulses
],
axis=0)
elif n > self._max_num_collisions:
impulses = impulses[-self._max_num_collisions:]
self._cached_impulse = impulses
self._prev_cached_frame = current_frame
return impulses
# ==============================================================================
# -- LaneInvasionSensor --------------------------------------------------------
# ==============================================================================
class LaneInvasionSensor(SensorBase):
"""LaneInvasionSensor for detecting lane invasion.
Lane invasion cannot be directly observed by raw sensors used by real cars.
So main purpose of this is to provide training signal (e.g. reward).
TODO: not completed.
"""
def __init__(self, parent_actor):
"""
Args:
parent_actor (carla.Actor): the parent actor of this sensor
"""
super().__init__(parent_actor)
world = self._parent.get_world()
bp = world.get_blueprint_library().find('sensor.other.lane_invasion')
self._sensor = world.spawn_actor(
bp, carla.Transform(), attach_to=self._parent)
# We need to pass the lambda a weak reference to self to avoid circular
# reference.
weak_self = weakref.ref(self)
self._sensor.listen(lambda event: LaneInvasionSensor._on_invasion(
weak_self, event))
@staticmethod
def _on_invasion(weak_self, event):
self = weak_self()
if not self:
return
def get_current_observation(self, current_frame):
raise NotImplementedError()
def observation_spec(self):
raise NotImplementedError()
def observation_desc(self):
raise NotImplementedError()
# ==============================================================================
# -- GnssSensor ----------------------------------------------------------------
# ==============================================================================
class GnssSensor(SensorBase):
"""GnssSensor for sensing GPS location."""
def __init__(self, parent_actor):
"""
Args:
parent_actor (carla.Actor): the parent actor of this sensor
"""
super().__init__(parent_actor)
world = self._parent.get_world()
bp = world.get_blueprint_library().find('sensor.other.gnss')
self._sensor = world.spawn_actor(
bp,
carla.Transform(carla.Location(x=1.0, z=2.8)),
attach_to=self._parent)
# We need to pass the lambda a weak reference to self to avoid circular
# reference.
weak_self = weakref.ref(self)
self._sensor.listen(lambda event: GnssSensor._on_gnss_event(
weak_self, event))
self._gps_location = np.zeros([3], dtype=np.float32)
self._frame = 0
@staticmethod
def _on_gnss_event(weak_self, event):
self = weak_self()
if not self:
return
self._gps_location = np.array(
[event.latitude, event.longitude, event.altitude],
dtype=np.float32)
self._frame = event.frame
def observation_spec(self):
return alf.TensorSpec([3])
def observation_desc(self):
return "A vector of [latitude (degrees), longitude (degrees), altitude (meters to be confirmed)]"
def get_current_observation(self, current_frame):
"""
Args:
current_frame (int): not used
Returns:
np.ndarray: A vector of [latitude (degrees), longitude (degrees),
altitude (meters to be confirmed)]
"""
return self._gps_location
# ==============================================================================
# -- IMUSensor -----------------------------------------------------------------
# ==============================================================================
class IMUSensor(SensorBase):
"""IMUSensor for sensing accelaration and rotation."""
def __init__(self, parent_actor):
"""
Args:
parent_actor (carla.Actor): the parent actor of this sensor
"""
super().__init__(parent_actor)
self._compass = 0.0
world = self._parent.get_world()
bp = world.get_blueprint_library().find('sensor.other.imu')
self._sensor = world.spawn_actor(
bp, carla.Transform(), attach_to=self._parent)
# We need to pass the lambda a weak reference to self to avoid circular
# reference.
weak_self = weakref.ref(self)
self._sensor.listen(lambda sensor_data: IMUSensor._IMU_callback(
weak_self, sensor_data))
self._imu_reading = np.zeros([7], dtype=np.float32)
self._frame = 0
@staticmethod
def _IMU_callback(weak_self, sensor_data):
self = weak_self()
if not self:
return
if not math.isnan(sensor_data.compass):
self._compass = sensor_data.compass
else:
logging.warning(
"Got nan for compass. Use the previous compass reading.")
imu_reading = np.array([
sensor_data.accelerometer.x, sensor_data.accelerometer.y,
sensor_data.accelerometer.z, sensor_data.gyroscope.x,
sensor_data.gyroscope.y, sensor_data.gyroscope.z, self._compass
],
dtype=np.float32)
self._imu_reading = np.clip(imu_reading, -99.9, 99.9)
self._frame = sensor_data.frame
def observation_spec(self):
return alf.TensorSpec([7])
def observation_desc(self):
return (
"7-D vector of [accelaration, gyroscope, compass], where "
"accelaration is a 3-D vector in m/s^2, gyroscope is angular "
"velocity in rad/s^2, and compass is orientation with regard to the "
"North ((0.0, 1.0, 0.0) in Unreal Engine) in radians.")
def get_current_observation(self, current_frame):
return self._imu_reading
# ==============================================================================
# -- RadarSensor ---------------------------------------------------------------
# ==============================================================================
@gin.configurable
class RadarSensor(SensorBase):
"""RadarSensor for detecting obstacles."""
def __init__(self,
parent_actor,
xyz=(2.8, 0., 1.0),
pyr=(5., 0., 0.),
max_num_detections=200):
"""
Args:
parent_actor (carla.Actor): the parent actor of this sensor.
xyz (tuple[float]): the attachment positition (x, y, z) relative to
the parent_actor.
pyr (tuple[float]): the attachment rotation (pitch, yaw, roll) in
degrees.
max_num_detections (int): maximal number of detection points.
"""
super().__init__(parent_actor)
self._velocity_range = 7.5 # m/s
self._max_num_detections = max_num_detections
world = self._parent.get_world()
bp = world.get_blueprint_library().find('sensor.other.radar')
bp.set_attribute('horizontal_fov', str(35))
bp.set_attribute('vertical_fov', str(20))
self._sensor = world.spawn_actor(
bp,
carla.Transform(carla.Location(*xyz), carla.Rotation(*pyr)),
attach_to=self._parent)
# We need a weak reference to self to avoid circular reference.
weak_self = weakref.ref(self)
self._sensor.listen(lambda radar_data: RadarSensor._Radar_callback(
weak_self, radar_data))
self._empty_points = np.zeros([max_num_detections, 4],
dtype=np.float32)
self._detected_points = self._empty_points
self._frame = 0
self._prev_cached_frame = -1
self._cached_points = None
self._lock = threading.Lock()
@staticmethod
def _Radar_callback(weak_self, radar_data):
self = weak_self()
if not self:
return
self._frame = radar_data.frame
points = np.frombuffer(radar_data.raw_data, dtype=np.float32)
points = np.reshape(points, (len(radar_data), 4))
n = len(radar_data)
if n < self._max_num_detections:
points = np.concatenate([
np.zeros([self._max_num_detections - n, 4], dtype=np.float32),
points
],
axis=0)
elif n > self._max_num_detections:
points = points[-self._max_num_detections:, :]
with self._lock:
self._detected_points = points
def observation_spec(self):
return alf.TensorSpec([self._max_num_detections, 4])
def observation_desc(self):
return (
"A set of detected points. Each detected point is a 4-D vector "
"of [vel, altitude, azimuth, depth], where vel is the velocity of "
"the detected object towards the sensor in m/s, altitude is the "
"altitude angle of the detection in radians, azimuth is the azimuth "
"angle of the detection in radians, and depth id the distance from "
"the sensor to the detection in meters.")
def get_current_observation(self, current_frame):
"""
Args:
current_frame (int): current frame no. RadarSensor may not receive
any data in the most recent tick. ``current_frame`` will be
compared against the frame no. of the last received data to make
sure that the data is correctly interpretted.
Returns:
np.ndarray: A set of detected points. Each detected point is a 4-D
vector of [vel, altitude, azimuth, depth], where vel is the
velocity of the detected object towards the sensor in m/s,
altitude is the altitude angle of the detection in radians,
azimuth is the azimuth angle of the detection in radians, and
depth id the distance from the sensor to the detection in meters.
"""
if current_frame == self._prev_cached_frame:
return self._cached_points
assert current_frame > self._prev_cached_frame, (
"Cannot get frames %d older than previously cached one %d!" %
(current_frame, self._prev_cached_frame))
with self._lock:
self._cached_points = self._detected_points
self._detected_points = self._empty_points
self._prev_cached_frame = current_frame
return self._cached_points
# ==============================================================================
# -- CameraSensor -------------------------------------------------------------
# ==============================================================================
@gin.configurable
class CameraSensor(SensorBase):
"""CameraSensor."""
def __init__(
self,
parent_actor,
sensor_type='sensor.camera.rgb',
xyz=(1.6, 0., 1.7),
pyr=(0., 0., 0.),
attachment_type='rigid',
fov=90.0,
fstop=1.4,
gamma=2.2,
image_size_x=640,
image_size_y=480,
iso=1200.0,
):
"""
Args:
parent_actor (carla.Actor): the parent actor of this sensor
sensor_type (str): 'sensor.camera.rgb', 'sensor.camera.depth',
'sensor.camera.semantic_segmentation'
attachment_type (str): There are two types of attachement. 'rigid':
the object follow its parent position strictly. 'spring_arm':
the object expands or retracts depending on camera situation.
xyz (tuple[float]): the attachment positition (x, y, z) relative to
the parent_actor.
pyr (tuple[float]): the attachment rotation (pitch, yaw, roll) in
degrees.
fov (str): horizontal field of view in degrees.
image_size_x (int): image width in pixels.
image_size_y (int): image height in pixels.
gamma (float): target gamma value of the camera.
iso (float): the camera sensor sensitivity.
"""
super().__init__(parent_actor)
attachment_type_map = {
'rigid': carla.AttachmentType.Rigid,
'spring_arm': carla.AttachmentType.SpringArm,
}
assert attachment_type in attachment_type_map, (
"Unknown attachment_type %s" % attachment_type)
self._attachment_type = attachment_type_map[attachment_type]
self._camera_transform = carla.Transform(
carla.Location(*xyz), carla.Rotation(*pyr))
self._sensor_type = sensor_type
sensor_map = {
'sensor.camera.rgb': (carla.ColorConverter.Raw, 3),
'sensor.camera.depth': (carla.ColorConverter.LogarithmicDepth, 1),
'sensor.camera.semantic_segmentation': (carla.ColorConverter.Raw,
1),
}
assert sensor_type in sensor_map, "Unknown sensor type %s" % sensor_type
conversion, num_channels = sensor_map[sensor_type]
self._conversion = conversion
self._observation_spec = alf.TensorSpec(
[num_channels, image_size_y, image_size_x], dtype='uint8')
world = self._parent.get_world()
bp = world.get_blueprint_library().find(sensor_type)
attributes = dict(
fov=fov,
fstop=fstop,
gamma=gamma,
image_size_x=image_size_x,
image_size_y=image_size_y,
iso=iso)
for name, val in attributes.items():
if bp.has_attribute(name):
bp.set_attribute(name, str(val))
self._sensor = self._parent.get_world().spawn_actor(
bp,
self._camera_transform,
attach_to=self._parent,
attachment_type=self._attachment_type)
# We need to pass the lambda a weak reference to self to avoid
# circular reference.
weak_self = weakref.ref(self)
self._sensor.listen(lambda image: CameraSensor._parse_image(
weak_self, image))
self._frame = 0
self._image = np.zeros([num_channels, image_size_y, image_size_x],
dtype=np.uint8)
def render(self, display):
"""Render the camera image to a pygame display.
Args:
display (pygame.Surface): the display surface to draw the image
"""
if self._image is not None:
import cv2
import pygame
height, width = self._image.shape[1:3]
image = np.transpose(self._image, (2, 1, 0))
if width < MINIMUM_RENDER_WIDTH:
height = height * MINIMUM_RENDER_WIDTH // width
image = cv2.resize(
image,
dsize=(height, MINIMUM_RENDER_WIDTH),
interpolation=cv2.INTER_NEAREST)
surface = pygame.surfarray.make_surface(image)
display.blit(surface, (0, 0))
@staticmethod
def _parse_image(weak_self, image):
self = weak_self()
if not self:
return
image.convert(self._conversion)
array = np.frombuffer(image.raw_data, dtype=np.uint8)
array = | np.reshape(array, (image.height, image.width, 4)) | numpy.reshape |
import numpy as np
from scipy import ndimage
import tifffile as tiff
import matplotlib.pyplot as plt
import pandas as pd
from enum import Enum
from skimage.transform import resize
# Worldview-3 - Panchromatic (3349, 3338): 400nm - 800nm
# Worldview-3 RGB (3350, 3338)
# Worldview-3 - 8 Multispectral bands (838, 835):
# Coastal: 400 - 450 nm (0, QGIS: 1, WV-3-Band-no:2) Red: 630 - 690 nm (4, QGIS: 5, WV-3-Band-no:6)
# Blue: 450 - 510 nm (1, QGIS: 2, WV-3-Band-no:3) Red Edge: 705 - 745 nm (5, QGIS: 6, WV-3-Band-no:7)
# Green: 510 - 580 nm (2, QGIS: 3, WV-3-Band-no:4) Near-IR1: 770 - 895 nm (6, QGIS: 7, WV-3-Band-no:8)
# Yellow: 585 - 625 nm (3, QGIS: 4, WV-3-Band-no:5) Near-IR2: 860 - 1040 nm (7, QGIS: 8, WV-3-Band-no:9)
# NIR - Near Infra Red: 750nm - 1400nm
# MIR - Mid Infra Red: 3000nm - 8000nm
# Worldview-3 - 8 SWIR bands (134, 133):
# SWIR-1: 1195 - 1225 nm SWIR-5: 2145 - 2185 nm
# SWIR-2: 1550 - 1590 nm SWIR-6: 2185 - 2225 nm
# SWIR-3: 1640 - 1680 nm SWIR-7: 2235 - 2285 nm
# SWIR-4: 1710 - 1750 nm SWIR-8: 2295 - 2365 nm
class WV3ms(Enum):
COASTAL = 0
BLUE = 1
GREEN = 2
YELLOW = 3
RED = 4
REDEDGE = 5
NEARIR1 = 6
NEARIR2 = 7
class WV3swir(Enum):
SWIR_1 = 0
SWIR_2 = 1
SWIR_3 = 2
SWIR_4 = 3
SWIR_5 = 4
SWIR_6 = 5
SWIR_7 = 6
SWIR_8 = 7
CCCI_THRESHOLD_U = 0.5
CCCI_THRESHOLD_L = -4
FAUX_CCCI_THRESHOLD = 0.11
# CCCI_SWIR_THRESHOLD = 1.03
CCCI_SWIR_THRESHOLD = .94
NDWI_THRESHOLD = 0.07
NDVI_THRESHOLD = 0.07
def stretch_8bit(bands, lower_percent=2, higher_percent=98, depth=3):
# contrast enhancement as per QGIS Stretch to MinMax
# note that input image range is 0 .. 1
out = np.zeros_like(bands).astype(np.float32)
for i in range(depth):
a = 0
b = 1
if depth == 1:
c = np.percentile(bands[:, :], lower_percent)
d = np.percentile(bands[:, :], higher_percent)
t = a + (bands[:, :] - c) * (b - a) / (d - c)
else:
c = np.percentile(bands[:, :, i], lower_percent)
d = np.percentile(bands[:, :, i], higher_percent)
t = a + (bands[:, :, i] - c) * (b - a) / (d - c)
t[t < a] = a
t[t > b] = b
if depth == 1:
out[:, :] = t
else:
out[:, :, i] = t
return out.astype(np.float32)
def EVI_index(msdata):
# Enhanced Vegetation Index
NIR2 = msdata[WV3ms.NEARIR2.value, :, :].astype(np.float32)
R = msdata[WV3ms.RED.value, :, :].astype(np.float32)
CB = msdata[WV3ms.COASTAL.value, :, :].astype(np.float32)
# EVI = 2.5 * (NIR2 - R)/(NIR2 + 6.0*R - 7.5*CB + 1.0)
a = 2.5 * (NIR2 - R)
b = NIR2 + 6.0*R - 7.5*CB + 1.0
with np.errstate(divide='ignore', invalid='ignore'):
EVI = np.true_divide(a, b)
EVI[EVI == np.inf] = 0
EVI = np.nan_to_num(EVI)
return EVI
def SAVI_index(msdata):
# Soil Adjusted Vegetation Index
NIR1 = msdata[WV3ms.NEARIR1.value, :, :].astype(np.float32)
R = msdata[WV3ms.RED.value, :, :].astype(np.float32)
# The value of L varies by the amount or cover of green vegetation: in very high vegetation regions,
# L=0; and in areas with no green vegetation, L=1. Generally, an L=0.5 works well in most situations
# and is the default value used. When L=0, then SAVI = NDVI.
L = 0.5
# SAVI = (1 + L) * (NIR1 - R)/(NIR1 + R + L)
a = (1 + L) * (NIR1 - R)
b = NIR1 + R + L
with np.errstate(divide='ignore', invalid='ignore'):
SAVI = np.true_divide(a, b)
SAVI[SAVI == np.inf] = 0
SAVI = np.nan_to_num(SAVI)
return SAVI
def faux_CCCI_index(msdata, rgbdata):
RE = resize(msdata[WV3ms.REDEDGE.value, :, :], (rgbdata.shape[0], rgbdata.shape[1]),
mode='constant', preserve_range=False)
NIR2 = resize(msdata[WV3ms.NEARIR2.value, :, :], (rgbdata.shape[0], rgbdata.shape[1]),
mode='constant', preserve_range=False)
R = rgbdata[:, :, 0]
# resize: note that with the default preserve_range=False the input image is
# converted according to the conventions of img_as_float (values in [0, 1])
# from the original 11 bits range [0, 2047]. preserve_range=True should be used.
# faux_CCCI_index only works preserve_range=False - reason unknown
# Canopy Chlorophyll Content Index
# CCCI = ((NIR2 - RE) / (NIR2 + RE)) / ((NIR2 - R) / (NIR2 + R))
a = NIR2 - RE
b = NIR2 + RE
# c = NIR2 - R
# d = NIR2 + R
c = R * (-1)
d = R
with np.errstate(divide='ignore', invalid='ignore'):
e = np.true_divide(a, b)
e[e == np.inf] = 0
e = np.nan_to_num(e)
f = np.true_divide(c, d)
f[f == np.inf] = 0
f = np.nan_to_num(f)
CCCI = np.true_divide(e, f)
CCCI[CCCI == np.inf] = 0
CCCI = np.nan_to_num(CCCI)
return CCCI
def CCCI_NIR2_index(msdata):
# Canopy Chlorophyll Content Index
# uses NIR2 rather than SWIR_1
RE = msdata[WV3ms.REDEDGE.value, :, :].astype(np.float32)
NIR2 = msdata[WV3ms.NEARIR2.value, :, :].astype(np.float32)
R = msdata[WV3ms.RED.value, :, :].astype(np.float32)
# CCCI = ((NIR2 - RE)/ NIR2 + RE)) / ((NIR2 - R)/(NIR2 + R))
a = NIR2 - RE
b = NIR2 + RE
c = NIR2 - R
d = NIR2 + R
with np.errstate(divide='ignore', invalid='ignore'):
e = np.true_divide(a, b)
e[e == np.inf] = 0
e = np.nan_to_num(e)
f = np.true_divide(c, d)
f[f == np.inf] = 0
f = np.nan_to_num(f)
CCCI = np.true_divide(e, f)
CCCI[CCCI == np.inf] = 0
CCCI = np.nan_to_num(CCCI)
return CCCI
def CCCI_SWIR_index(msdata, swirdata):
# Canopy Chlorophyll Content Index
# uses SWIR_1
RE = msdata[WV3ms.REDEDGE.value, :, :].astype(np.float32)
SWIR1 = resize(swirdata[WV3swir.SWIR_1.value, :, :], (msdata.shape[1], msdata.shape[2]),
mode='constant', preserve_range=True).astype(np.float32)
R = msdata[WV3ms.RED.value, :, :].astype(np.float32)
# CCCI = ((SWIR1 - RE)/ SWIR1 + RE)) / ((SWIR1 - R)/(SWIR1 + R))
a = SWIR1 - RE
b = SWIR1 + RE
c = SWIR1 - R
d = SWIR1 + R
with np.errstate(divide='ignore', invalid='ignore'):
e = np.true_divide(a, b)
e[e == np.inf] = 0
e = np.nan_to_num(e)
f = np.true_divide(c, d)
f[f == np.inf] = 0
f = np.nan_to_num(f)
CCCI = np.true_divide(e, f)
CCCI[CCCI == np.inf] = 0
CCCI = | np.nan_to_num(CCCI) | numpy.nan_to_num |
# Copyright 2018-2020 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for the JacobianTape"""
import pytest
import numpy as np
import pennylane as qml
from pennylane.tape import JacobianTape, QuantumTape
class TestConstruction:
"""Test for queuing and construction"""
@pytest.fixture
def make_tape(self):
ops = []
obs = []
with JacobianTape() as tape:
ops += [qml.RX(0.432, wires=0)]
ops += [qml.Rot(0.543, 0, 0.23, wires=0)]
ops += [qml.CNOT(wires=[0, "a"])]
ops += [qml.RX(0.133, wires=4)]
obs += [qml.PauliX(wires="a")]
qml.expval(obs[0])
obs += [qml.probs(wires=[0, "a"])]
return tape, ops, obs
def test_parameter_info(self, make_tape):
"""Test that parameter information is correctly extracted"""
tape, ops, obs = make_tape
tape._update_gradient_info()
assert tape._trainable_params == set(range(5))
assert tape._par_info == {
0: {"op": ops[0], "p_idx": 0, "grad_method": "F"},
1: {"op": ops[1], "p_idx": 0, "grad_method": "F"},
2: {"op": ops[1], "p_idx": 1, "grad_method": "F"},
3: {"op": ops[1], "p_idx": 2, "grad_method": "F"},
4: {"op": ops[3], "p_idx": 0, "grad_method": "0"},
}
class TestGradMethod:
"""Tests for parameter gradient methods"""
def test_non_differentiable(self):
"""Test that a non-differentiable parameter is
correctly marked"""
psi = np.array([1, 0, 1, 0]) / np.sqrt(2)
with JacobianTape() as tape:
qml.QubitStateVector(psi, wires=[0, 1])
qml.RX(0.543, wires=[0])
qml.RY(-0.654, wires=[1])
qml.CNOT(wires=[0, 1])
qml.probs(wires=[0, 1])
assert tape._grad_method(0) is None
assert tape._grad_method(1) == "F"
assert tape._grad_method(2) == "F"
tape._update_gradient_info()
assert tape._par_info[0]["grad_method"] is None
assert tape._par_info[1]["grad_method"] == "F"
assert tape._par_info[2]["grad_method"] == "F"
def test_independent(self):
"""Test that an independent variable is properly marked
as having a zero gradient"""
with JacobianTape() as tape:
qml.RX(0.543, wires=[0])
qml.RY(-0.654, wires=[1])
qml.expval(qml.PauliY(0))
assert tape._grad_method(0) == "F"
assert tape._grad_method(1) == "0"
tape._update_gradient_info()
assert tape._par_info[0]["grad_method"] == "F"
assert tape._par_info[1]["grad_method"] == "0"
# in non-graph mode, it is impossible to determine
# if a parameter is independent or not
tape._graph = None
assert tape._grad_method(1, use_graph=False) == "F"
class TestJacobian:
"""Unit tests for the jacobian method"""
def test_unknown_grad_method_error(self):
"""Test error raised if gradient method is unknown"""
tape = JacobianTape()
with pytest.raises(ValueError, match="Unknown gradient method"):
tape.jacobian(None, method="unknown method")
def test_non_differentiable_error(self):
"""Test error raised if attempting to differentiate with
respect to a non-differentiable argument"""
psi = np.array([1, 0, 1, 0]) / np.sqrt(2)
with JacobianTape() as tape:
qml.QubitStateVector(psi, wires=[0, 1])
qml.RX(0.543, wires=[0])
qml.RY(-0.654, wires=[1])
qml.CNOT(wires=[0, 1])
qml.probs(wires=[0, 1])
# by default all parameters are assumed to be trainable
with pytest.raises(
ValueError, match=r"Cannot differentiate with respect to parameter\(s\) {0}"
):
tape.jacobian(None)
# setting trainable parameters avoids this
tape.trainable_params = {1, 2}
dev = qml.device("default.qubit", wires=2)
res = tape.jacobian(dev)
assert res.shape == (4, 2)
def test_analytic_method_with_unsupported_params(self):
"""Test that an exception is raised if method="A" but a parameter
only support finite differences"""
with JacobianTape() as tape:
qml.RX(0.543, wires=[0])
qml.RY(-0.654, wires=[0])
qml.expval(qml.PauliY(0))
dev = qml.device("default.qubit", wires=1)
with pytest.raises(ValueError, match=r"analytic gradient method cannot be used"):
tape.jacobian(dev, method="analytic")
def test_analytic_method(self, mocker):
"""Test that calling the Jacobian with method=analytic correctly
calls the analytic_pd method"""
mock = mocker.patch("pennylane.tape.JacobianTape._grad_method")
mock.return_value = "A"
with JacobianTape() as tape:
qml.RX(0.543, wires=[0])
qml.RY(-0.654, wires=[0])
qml.expval(qml.PauliY(0))
dev = qml.device("default.qubit", wires=1)
tape.analytic_pd = mocker.Mock()
tape.analytic_pd.return_value = [[QuantumTape()], lambda res: np.array([1.])]
tape.jacobian(dev, method="analytic")
assert len(tape.analytic_pd.call_args_list) == 2
def test_device_method(self, mocker):
"""Test that calling the Jacobian with method=device correctly
calls the device_pd method"""
with JacobianTape() as tape:
qml.RX(0.543, wires=[0])
qml.RY(-0.654, wires=[0])
qml.expval(qml.PauliY(0))
dev = qml.device("default.qubit", wires=1)
dev.jacobian = mocker.Mock()
tape.device_pd(dev)
dev.jacobian.assert_called_once()
dev.jacobian = mocker.Mock()
tape.jacobian(dev, method="device")
dev.jacobian.assert_called_once()
def test_no_output_execute(self):
"""Test that tapes with no measurement process return
an empty list."""
dev = qml.device("default.qubit", wires=2)
params = [0.1, 0.2]
with JacobianTape() as tape:
qml.RX(params[0], wires=[0])
qml.RY(params[1], wires=[1])
res = tape.jacobian(dev)
assert res.size == 0
def test_incorrect_inferred_output_dim(self):
"""Test that a quantum tape with an incorrect inferred output dimension
corrects itself when computing the Jacobian."""
dev = qml.device("default.qubit", wires=3)
params = [1.0, 1.0, 1.0]
with JacobianTape() as tape:
qml.RX(params[0], wires=[0])
qml.RY(params[1], wires=[1])
qml.RZ(params[2], wires=[2])
qml.CNOT(wires=[0, 1])
qml.probs(wires=0)
qml.probs(wires=[1])
# inferred output dim should be correct
assert tape.output_dim == sum([2, 2])
# modify the output dim
tape._output_dim = 2
res = tape.jacobian(dev, order=2, method="numeric")
# output dim should be correct
assert tape.output_dim == sum([2, 2])
assert res.shape == (4, 3)
def test_incorrect_ragged_output_dim(self, mocker):
"""Test that a quantum tape with an incorrect inferred *ragged* output dimension
corrects itself after evaluation."""
dev = qml.device("default.qubit", wires=3)
params = [1.0, 1.0, 1.0]
with JacobianTape() as tape:
qml.RX(params[0], wires=[0])
qml.RY(params[1], wires=[1])
qml.RZ(params[2], wires=[2])
qml.CNOT(wires=[0, 1])
qml.probs(wires=0)
qml.probs(wires=[1, 2])
# inferred output dim should be correct
assert tape.output_dim == sum([2, 4])
# modify the output dim
tape._output_dim = 2
res = tape.jacobian(dev, order=2, method="numeric")
# output dim should be correct
assert tape.output_dim == sum([2, 4])
assert res.shape == (6, 3)
def test_independent_parameter(self, mocker):
"""Test that an independent parameter is skipped
during the Jacobian computation."""
numeric_spy = mocker.spy(JacobianTape, "numeric_pd")
analytic_spy = mocker.spy(JacobianTape, "analytic_pd")
with JacobianTape() as tape:
qml.RX(0.543, wires=[0])
qml.RY(-0.654, wires=[1])
qml.expval(qml.PauliZ(0))
dev = qml.device("default.qubit", wires=2)
res = tape.jacobian(dev)
assert res.shape == (1, 2)
# the numeric pd method is only called once
assert len(numeric_spy.call_args_list) == 1
# analytic pd should not be called at all
assert len(analytic_spy.call_args_list) == 0
# the numeric pd method is only called for parameter 0
assert numeric_spy.call_args[0] == (tape, 0)
def test_no_trainable_parameters(self, mocker):
"""Test that if the tape has no trainable parameters, no
subroutines are called and the returned Jacobian is empty"""
numeric_spy = mocker.spy(JacobianTape, "numeric_pd")
analytic_spy = mocker.spy(JacobianTape, "analytic_pd")
with JacobianTape() as tape:
qml.RX(0.543, wires=[0])
qml.RY(-0.654, wires=[1])
qml.expval(qml.PauliZ(0))
dev = qml.device("default.qubit", wires=2)
tape.trainable_params = {}
res = tape.jacobian(dev)
assert res.size == 0
assert np.all(res == np.array([[]]))
numeric_spy.assert_not_called()
analytic_spy.assert_not_called()
def test_y0(self, mocker):
"""Test that if first order finite differences is used, then
the tape is executed only once using the current parameter
values."""
dev = qml.device("default.qubit", wires=2)
execute_spy = mocker.spy(dev, "execute")
numeric_spy = mocker.spy(JacobianTape, "numeric_pd")
with JacobianTape() as tape:
qml.RX(0.543, wires=[0])
qml.RY(-0.654, wires=[0])
qml.expval(qml.PauliZ(0))
res = tape.jacobian(dev, order=1)
# the execute device method is called once per parameter,
# plus one global call
assert len(execute_spy.call_args_list) == tape.num_params + 1
assert "y0" in numeric_spy.call_args_list[0][1]
assert "y0" in numeric_spy.call_args_list[1][1]
def test_parameters(self, tol):
"""Test Jacobian computation works when parameters are both passed and not passed."""
dev = qml.device("default.qubit", wires=2)
params = [0.1, 0.2]
with JacobianTape() as tape:
qml.RX(params[0], wires=[0])
qml.RY(params[1], wires=[1])
qml.CNOT(wires=[0, 1])
qml.expval(qml.PauliZ(0) @ qml.PauliX(1))
# test Jacobian with no parameters
res1 = tape.jacobian(dev)
assert tape.get_parameters() == params
# test Jacobian with parameters
res2 = tape.jacobian(dev, params=[0.5, 0.6])
assert tape.get_parameters() == params
# test setting parameters
tape.set_parameters(params=[0.5, 0.6])
res3 = tape.jacobian(dev)
assert np.allclose(res2, res3, atol=tol, rtol=0)
assert not np.allclose(res1, res2, atol=tol, rtol=0)
assert tape.get_parameters() == [0.5, 0.6]
def test_numeric_pd_no_y0(self, tol):
"""Test that, if y0 is not passed when calling the numeric_pd method,
y0 is calculated."""
dev = qml.device("default.qubit", wires=2)
params = [0.1, 0.2]
with JacobianTape() as tape:
qml.RX(params[0], wires=[0])
qml.RY(params[1], wires=[1])
qml.CNOT(wires=[0, 1])
qml.expval(qml.PauliZ(0) @ qml.PauliX(1))
# compute numeric gradient of parameter 0, without passing y0
tapes, fn = tape.numeric_pd(0)
assert len(tapes) == 2
res1 = fn([tape.execute(dev) for tape in tapes])
# compute y0 in advance
y0 = tape.execute(dev)
tapes, fn = tape.numeric_pd(0, y0=y0)
assert len(tapes) == 1
res2 = fn([tape.execute(dev) for tape in tapes])
assert np.allclose(res1, res2, atol=tol, rtol=0)
def test_numeric_unknown_order(self):
"""Test that an exception is raised if the finite-difference
order is not supported"""
dev = qml.device("default.qubit", wires=2)
params = [0.1, 0.2]
with JacobianTape() as tape:
qml.RX(1, wires=[0])
qml.RY(1, wires=[1])
qml.RZ(1, wires=[2])
qml.CNOT(wires=[0, 1])
qml.expval(qml.PauliZ(0) @ qml.PauliX(1) @ qml.PauliZ(2))
with pytest.raises(ValueError, match="Order must be 1 or 2"):
tape.jacobian(dev, order=3)
def test_independent_parameters(self):
"""Test the case where expectation values are independent of some parameters. For those
parameters, the gradient should be evaluated to zero without executing the device."""
dev = qml.device("default.qubit", wires=2)
with JacobianTape() as tape1:
qml.RX(1, wires=[0])
qml.RX(1, wires=[1])
qml.expval(qml.PauliZ(0))
with JacobianTape() as tape2:
qml.RX(1, wires=[0])
qml.RX(1, wires=[1])
qml.expval(qml.PauliZ(1))
j1 = tape1.jacobian(dev)
# We should only be executing the device to differentiate 1 parameter (2 executions)
assert dev.num_executions == 2
j2 = tape2.jacobian(dev)
exp = - np.sin(1)
assert np.allclose(j1, [exp, 0])
assert np.allclose(j2, [0, exp])
class TestJacobianIntegration:
"""Integration tests for the Jacobian method"""
def test_ragged_output(self):
"""Test that the Jacobian is correctly returned for a tape
with ragged output"""
dev = qml.device("default.qubit", wires=3)
params = [1.0, 1.0, 1.0]
with JacobianTape() as tape:
qml.RX(params[0], wires=[0])
qml.RY(params[1], wires=[1])
qml.RZ(params[2], wires=[2])
qml.CNOT(wires=[0, 1])
qml.probs(wires=0)
qml.probs(wires=[1, 2])
res = tape.jacobian(dev)
assert res.shape == (6, 3)
def test_single_expectation_value(self, tol):
"""Tests correct output shape and evaluation for a tape
with a single expval output"""
dev = qml.device("default.qubit", wires=2)
x = 0.543
y = -0.654
with JacobianTape() as tape:
qml.RX(x, wires=[0])
qml.RY(y, wires=[1])
qml.CNOT(wires=[0, 1])
qml.expval(qml.PauliZ(0) @ qml.PauliX(1))
res = tape.jacobian(dev)
assert res.shape == (1, 2)
expected = np.array([[-np.sin(y) * np.sin(x), np.cos(y) * np.cos(x)]])
assert np.allclose(res, expected, atol=tol, rtol=0)
def test_multiple_expectation_values(self, tol):
"""Tests correct output shape and evaluation for a tape
with multiple expval outputs"""
dev = qml.device("default.qubit", wires=2)
x = 0.543
y = -0.654
with JacobianTape() as tape:
qml.RX(x, wires=[0])
qml.RY(y, wires=[1])
qml.CNOT(wires=[0, 1])
qml.expval(qml.PauliZ(0))
qml.expval(qml.PauliX(1))
res = tape.jacobian(dev)
assert res.shape == (2, 2)
expected = np.array([[-np.sin(x), 0], [0, np.cos(y)]])
assert np.allclose(res, expected, atol=tol, rtol=0)
def test_var_expectation_values(self, tol):
"""Tests correct output shape and evaluation for a tape
with expval and var outputs"""
dev = qml.device("default.qubit", wires=2)
x = 0.543
y = -0.654
with JacobianTape() as tape:
qml.RX(x, wires=[0])
qml.RY(y, wires=[1])
qml.CNOT(wires=[0, 1])
qml.expval(qml.PauliZ(0))
qml.var(qml.PauliX(1))
res = tape.jacobian(dev)
assert res.shape == (2, 2)
expected = np.array([[-np.sin(x), 0], [0, -2 * np.cos(y) * np.sin(y)]])
assert np.allclose(res, expected, atol=tol, rtol=0)
def test_prob_expectation_values(self, tol):
"""Tests correct output shape and evaluation for a tape
with prob and expval outputs"""
dev = qml.device("default.qubit", wires=2)
x = 0.543
y = -0.654
with JacobianTape() as tape:
qml.RX(x, wires=[0])
qml.RY(y, wires=[1])
qml.CNOT(wires=[0, 1])
qml.expval(qml.PauliZ(0))
qml.probs(wires=[0, 1])
res = tape.jacobian(dev)
assert res.shape == (5, 2)
expected = (
np.array(
[
[-2 * np.sin(x), 0],
[
-(np.cos(y / 2) ** 2 * np.sin(x)),
-(np.cos(x / 2) ** 2 * np.sin(y)),
],
[
-(np.sin(x) * np.sin(y / 2) ** 2),
(np.cos(x / 2) ** 2 * | np.sin(y) | numpy.sin |
import os
import re
import numpy as np
import scipy.io as sio
from scipy.fftpack import fft
import pandas as pd
from .movie import Movie, FullFieldFlashMovie
pd.set_option('display.width', 1000)
pd.set_option('display.max_columns', 100)
#################################################
def chunks(l, n):
"""Yield successive n-sized chunks from l."""
for i in range(0, len(l), n):
yield l[i:i + n]
##################################################
def compute_FFT_OneCycle(FR, TF, downsample):
one_cyc = np.int(((1000. / downsample) / TF))
FR_cyc = list(chunks(FR, one_cyc))
if (TF == 15. or TF == 8.):
FR_cyc = FR_cyc[:-1]
FR_cyc_avg = np.mean(FR_cyc, axis=0)
y = FR_cyc_avg
AMP = 2 * np.abs(fft(y) / len(y))
F0 = 0.5 * AMP[0]
assert (F0 - np.mean(y) < 1.e-4)
F1 = AMP[1]
return F0, F1
##################################################
def create_ff_mov(frame_rate, tst, tend, xrng, yrng):
ff_mov_on = FullFieldFlashMovie(np.arange(xrng), np.arange(yrng), tst, tend, frame_rate=frame_rate,
max_intensity=1).full(t_max=tend) # +0.5)
ff_mov_off = FullFieldFlashMovie(np.arange(xrng), | np.arange(yrng) | numpy.arange |
# -*- coding: utf-8 -*-
"""HousingModel
Main model class
"""
import time
import numpy as np
import pandas as pd
# consav package
from consav import ModelClass
from consav.misc import elapsed, nonlinspace, markov_rouwenhorst
# local
import post_decision
import negm
import nvfi
import simulate
def solve_model(model,t_min=0,do_print=True):
""" solve the model """
par = model.par
sol = model.sol
t0_outer = time.time()
# a. re-set up grids
t0 = time.time()
model.create_grids()
if do_print: print(f'setup grids in {elapsed(t0)}')
# c. time loop
for t in reversed(range(t_min,par.T)):
t0 = time.time()
# i. post-decions
t0_pd = time.time()
post_decision.compute_wq_renters(par,sol,t)
post_decision.compute_wq_owners(par,sol,t)
t_pd = elapsed(t0_pd)
# ii. negm
t0_negm = time.time()
negm.solve_renters(par,sol,t)
negm.solve_owners(par,sol,t)
t_negm = elapsed(t0_negm)
# iii. evaluate values of each discrete choice
t0_evaluate = time.time()
nvfi.evaluate_rt(par,sol,t)
nvfi.evaluate_ft(par,sol,t)
nvfi.evaluate_bt(par,sol,t)
t_evaluate = elapsed(t0_evaluate)
# iv. final nvfi
t0_nvfi = time.time()
nvfi.solve_renters(par,sol,t)
t_nvfi_r = elapsed(t0_nvfi)
t0_nvfi = time.time()
nvfi.solve_owners(par,sol,t)
t_nvfi_o = elapsed(t0_nvfi)
if do_print:
msg = f't = {t:2d} solved in {elapsed(t0)}'
msg += f'[pd: {t_pd}, negm: {t_negm}, evaluate: {t_evaluate}, nvfi_r: {t_nvfi_r}, nvfi_o: {t_nvfi_o}]'
print(msg)
if do_print: print(f'model solved in {elapsed(t0_outer)}')
def simulate_model(model,do_print=True,seed=1986):
""" simulate the model """
if not seed is None: np.random.seed(seed)
par = model.par
sol = model.sol
sim = model.sim
t0_outer = time.time()
# a. draw random numbers
sim.i_beta[:] = np.random.choice(par.Nbeta,size=par.simN) # preferences
sim.a0[:] = np.random.gamma(par.a0_shape,par.a0_scale,size=par.simN) # initial assets
sim.pi_p[:] = np.random.uniform(size=(par.simN,par.T)) # income process
sim.pi_c[:] = np.random.uniform(size=(par.simN,par.T)) # discrete choice
# b. simulate
simulate.simulate(par,sol,sim)
if do_print: print(f'model simulated in {elapsed(t0_outer)}')
# class
class HousingModelClass(ModelClass):
def setup(self):
""" set baseline parameters in .par """
par = self.par
# specify list over parameters, which are allowed not to be floats
self.not_float_list = ['T','TR','age_min','t_min','Delta_iota',
'Nbeta','Na','Niota','Nh','Nht','Np','NLTV','Nm','Nx','Nz',
'Nrt','Nbt','Nft','Nkt','Ncr','Nco','do_sim','simN']
# a. economic parameters
# life-cycle
par.T = 55 # life-span from age_min
par.TR = 37 # working-life-span from age_min
par.age_min = 25 # only used in figures
par.t_min = 0 # used when solving
# income
par.rho_p = 0.99 # persistence of income shocks
par.sigma_p = 0.30 # std. of income shocks
par.G = np.ones(par.T) # age-specific growth factors of income
par.G[:20] = 1.066
par.G[20:par.TR] = 1.015
par.G[par.TR:] = 0.96
par.retirement_drop = 1.00 # drop in income at retirement
# assets and housing
par.ra = 0.035 # return on liquid assets
par.rm = 0.040 # mortgage interest rate
par.rb = 0.070 # bank loan interest rate
par.ph = 1.000 # housing price
par.rh = 0.045 # rental price
par.delta = 0.0075 # mortgage interest only spread
par.gamma_m = 0.050 # mortgage repayment rate
par.gamma_b = 0.100 # bank loan repayment rate
par.tau_f = 0.100 # loan refinancing cost
par.tau_h = 0.200 # moving-in cost for owners
par.tau_ht = 0.010 # moving-in cost for renters
par.kappa_p = 4.00 # loan-to-income ratio
par.kappa_h = 0.95 # loan-to-value ratio
par.kappa_h_mortgage = 0.80 # loan-to-value ratio (mortgage)
par.grid_h = np.array([2.0,4.0,6.0,8.0,10.0,15.0,20.0,25.0,30.0,35.0],dtype=np.float_) # housing choices
par.grid_ht = par.grid_h.copy()
par.Niota = 2 # maximum interest only period
par.Delta_iota = 0 # = 0 permanent interest only possible, else = 1
# preferences
par.beta_mean = 0.96
par.beta_low = 0.85
par.beta_high = 0.99
par.rho = 2.0 # CRRA parameter
par.nu = 20.0 # bequest utility multiplicative scaling
par.zeta = 8.0 # bequest utility additive scaling
par.alpha = 0.70 # non-durable weight
par.omega = 1.20 # homeowner bonus
par.sigma = 0.025 # smoothing
par.sigma_agg = 0.050 # smoothing
# b. computational parameters
par.Nbeta = 3 # grid for beta
par.Np = 7 # grid for p
par.NLTV = 20 # grid for LTV
par.LTV_phi = 1.0 # 1 -> equally spaced, > 1 more points closer to kappa_p
par.Na = 100 # grid for a
par.a_min = 0.0
par.a_max = 50.0
par.a_phi = 1.25 # 1 -> equally spaced, > 1 more points closer to min
par.Nx = 200 # grid for x
par.x_min = 0.0
par.x_max = 80.0
par.x_phi = 1.25 # 1 -> equally spaced, > 1 more points closer to min
par.Nz = 200 # grid for z
par.z_min = 0.0
par.z_max = 50.0
par.z_phi = 1.25 # 1 -> equally spaced, > 1 more points closer to min
# c. simulation parameters
par.do_sim = True
par.a0_shape = 0.1
par.a0_scale = 5.0
par.simN = 100_000
def create_grids(self):
""" create grids """
par = self.par
assert par.Delta_iota in [0,1]
# a. states
if par.Nbeta == 1:
par.grid_beta = np.array([par.beta_mean])
else:
par.grid_beta = np.array([par.beta_low,par.beta_mean,par.beta_high])
assert par.Nbeta == par.grid_beta.size
par.grid_LTV = np.flip(par.kappa_h-nonlinspace(0.0,par.kappa_h,par.NLTV,par.LTV_phi))
par.grid_a = nonlinspace(par.a_min,par.a_max,par.Na,par.a_phi)
par.grid_z = nonlinspace(0,par.z_max,par.Nz,par.z_phi)
par.grid_x = nonlinspace(0,par.x_max,par.Nx,par.x_phi)
# inferred size of housing grids
par.Nh = par.grid_h.size # owners
par.Nht = par.grid_ht.size # renters
# infered number of discrete choices
par.Nrt = par.Nht # number of choices for renters
par.Nkt = 1 # number of choices for keepers
par.Nft = par.Niota*par.NLTV # number of choices for refinancers
par.Nbt = par.Niota*par.Nh*par.NLTV # number of choices for buyers
par.Ncr = par.Nht + par.Nbt # number of choices for lagged renters
par.Nco = par.Nht + par.Nbt + par.Nft + par.Nkt # number of choices for lagged owners
# renters
par.r_i_ht = -1*np.ones(par.Ncr,dtype=np.int_)
par.r_iota = -1*np.ones(par.Ncr,dtype=np.int_)
par.r_i_h = -1*np.ones(par.Ncr,dtype=np.int_)
par.r_i_LTV = -1*np.ones(par.Ncr,dtype=np.int_)
par.r_d = -1*np.ones(par.Ncr,dtype=np.int_)
# owners
par.o_i_ht = -1*np.ones(par.Nco,dtype=np.int_)
par.o_iota = -1*np.ones(par.Nco,dtype=np.int_)
par.o_i_h = -1*np.ones(par.Nco,dtype=np.int_)
par.o_i_LTV = -1*np.ones(par.Nco,dtype=np.int_)
par.o_d = -1*np.ones(par.Nco,dtype=np.int_)
# rt
i = 0
for i_ht in range(par.Nht):
par.r_i_ht[i] = i_ht
par.r_d[i] = 0
par.o_i_ht[i] = i_ht
par.o_d[i] = 0
i += 1
# bt
for iota in range(par.Niota):
for i_h in range(par.Nh):
for i_LTV in range(par.NLTV):
par.r_iota[i] = iota
par.r_i_h[i] = i_h
par.r_i_LTV[i] = i_LTV
par.r_d[i] = 1
par.o_iota[i] = iota
par.o_i_h[i] = i_h
par.o_i_LTV[i] = i_LTV
par.o_d[i] = 1
i += 1
# ft
for iota in range(par.Niota):
for i_LTV in range(par.NLTV):
par.o_iota[i] = iota
par.o_i_LTV[i] = i_LTV
par.o_d[i] = 2
i += 1
# kt
par.o_d[i] = 3
# b. income
out_ = markov_rouwenhorst(par.rho_p,par.sigma_p,par.Np)
par.grid_p, par.trans_p, par.ergodic_p, par.trans_cs_p, par.ergodic_cs_p = out_
par.Gamma = np.empty(par.T)
for t in range(par.T):
if t == 0: par.Gamma[t] = 1
else: par.Gamma[t] = par.G[t]*par.Gamma[t-1]
if t == par.TR: par.Gamma[t] *= par.retirement_drop
def allocate(self):
""" create grids and allocate memory for .par, .sol and .sim """
par = self.par
sol = self.sol
sim = self.sim
# a. parameters
self.create_grids()
# b. solution
# post-decison
post_r_shape = (par.T,par.Nbeta,par.Nht,par.Np,par.Na)
sol.r_q = np.nan*np.ones(post_r_shape)
sol.r_w = np.nan*np.ones(post_r_shape)
sol.r_inv_w = np.nan*np.ones(post_r_shape)
post_o_shape = (par.T,par.Nbeta,par.Niota,par.Nh,par.Np,par.NLTV,par.Na)
sol.o_q = np.nan*np.ones(post_o_shape)
sol.o_w = np.nan*np.ones(post_o_shape)
sol.o_inv_w = np.nan*np.ones(post_o_shape)
# consumption
negm_r_shape = (par.T,par.Nbeta,par.Nht,par.Np,par.Nz)
sol.r_inv_vbar = np.nan*np.ones(negm_r_shape)
sol.r_inv_mubar = np.nan*np.ones(negm_r_shape)
sol.r_cbar = np.nan*np.ones(negm_r_shape)
negm_o_shape = (par.T,par.Nbeta,par.Niota,par.Nh,par.Np,par.NLTV,par.Nz)
sol.o_inv_vbar = np.nan*np.ones(negm_o_shape)
sol.o_inv_mubar = np.nan*np.ones(negm_o_shape)
sol.o_cbar = np.nan*np.ones(negm_o_shape)
# intermediary
rt_shape = (par.T,par.Nbeta,par.Nht+1,par.Np,par.Nx,par.Nht)
sol.rt_inv_v = np.nan*np.ones(rt_shape)
sol.rt_inv_mu = np.nan* | np.ones(rt_shape) | numpy.ones |
#!/usr/bin/env python3
#<NAME> 2019
import numpy as np
import pandas as pd
import matplotlib
import os
if os.getlogin() == 'ojh251':
matplotlib.use('Agg')
from matplotlib import pyplot as plt
from astropy import units as u
import lightkurve
from scipy.special import legendre as P
from scipy.special import factorial as fct
from omnitool import literature_values as lv
plt.style.use(lightkurve.MPLSTYLE)
import argparse
parser = argparse.ArgumentParser(description='Generate a model of 16 Cyg A')
parser.add_argument('-n', '--noise', action='store_const',
const=False, default=True, help='Turn off Chi-Sqr 2 d.o.f. noise')
parser.add_argument('-b', '--background', action='store_const', const=False,
default=True, help='Turn off Harvey Profile background')
parser.add_argument('-a', '--apodization', action='store_const', const=False,
default=True, help='Turn off apodization')
parser.add_argument('years', default = 4., type=float, help='How many years worth of data')
parser.add_argument('-s','--save',action='store_const',const=True,
default=False, help='Save output.')
args = parser.parse_args()
class star():
def __init__(self, freqs, nyquist, numax, dnu, d02, nus, i):
'''A class model that stores the basic stellar properties'''
self.freqs = freqs
self.nyquist = nyquist
self.numax = numax
self.dnu = dnu
self.d02 = d02
self.epsilon = 0.601 + 0.632*np.log(self.dnu) #from Vrard et al. 2015 (for RGB)
self.nmax = self.numax/self.dnu - self.epsilon #from Vrard et al. 2015
self.lmax = 3 #Don't care about higher order
self.nus = nus #Depends on rotation & coriolis force (which I don't understand yet)
self.i = i #Determines the mode height
self.snr = 10.
def get_Hn(self, n):
#The height of the l=0 mode for a given n.
#These I will draw from a Gaussian with a given FWHM, as they depend on SNR
nun0 = self.asymodelocs(n, 0, 0)
hmax=self.snr*1.4
#I modulate the mode height based on a fudged estimate of the FWHM
fwhm = 0.25*self.numax #From LEGACY
std = fwhm / (2*np.sqrt(2*np.log(2)))
Hn = hmax * np.exp(-0.5 * (nun0 - self.numax)**2 / std**2)
return Hn
def get_Epsilonlm(self, i, l, m):
#I use the prescriptions from Gizon & Solank 2003 and Handberg & Campante 2012
if l == 0:
return 1
if l == 1:
if m == 0:
return np.cos(i)**2
if | np.abs(m) | numpy.abs |
import numpy as np
from tqdm import tqdm
import utils.helper as hlp
def slidewindow(ts, horizon=.2, stride=0.2):
xf = []
yf = []
for i in range(0, ts.shape[0], int(stride * ts.shape[0])):
horizon1 = int(horizon * ts.shape[0])
if (i + horizon1 + horizon1 <= ts.shape[0]):
xf.append(ts[i:i + horizon1,0])
yf.append(ts[i + horizon1:i + horizon1 + horizon1, 0])
xf = np.asarray(xf)
yf = np.asarray(yf)
return xf, yf
def cutPF(ts, perc=.5):
seq_len = ts.shape[0]
new_ts = ts.copy()
t=int(perc*seq_len)
return new_ts[:t, ...], new_ts[t:, ...]
def cutout(ts, perc=.1):
seq_len = ts.shape[0]
new_ts = ts.copy()
win_len = int(perc * seq_len)
start = np.random.randint(0, seq_len-win_len-1)
end = start + win_len
start = max(0, start)
end = min(end, seq_len)
# print("[INFO] start={}, end={}".format(start, end))
new_ts[start:end, ...] = 0
# return new_ts, ts[start:end, ...]
return new_ts
def cut_piece2C(ts, perc=.1):
seq_len = ts.shape[0]
win_class = seq_len/(2*2)
if perc<1:
win_len = int(perc * seq_len)
else:
win_len = perc
start1 = np.random.randint(0, seq_len-win_len)
end1 = start1 + win_len
start2 = np.random.randint(0, seq_len - win_len)
end2 = start2 + win_len
if abs(start1-start2)<(win_class):
label=0
else:
label=1
return ts[start1:end1, ...], ts[start2:end2, ...], label
def cut_piece3C(ts, perc=.1):
seq_len = ts.shape[0]
win_class = seq_len/(2*3)
if perc<1:
win_len = int(perc * seq_len)
else:
win_len = perc
start1 = np.random.randint(0, seq_len-win_len)
end1 = start1 + win_len
start2 = np.random.randint(0, seq_len - win_len)
end2 = start2 + win_len
if abs(start1-start2)<(win_class):
label=0
elif abs(start1-start2)<(2*win_class):
label=1
else:
label=2
return ts[start1:end1, ...], ts[start2:end2, ...], label
def cut_piece4C(ts, perc=.1):
seq_len = ts.shape[0]
win_class = seq_len / (2 * 4)
if perc < 1:
win_len = int(perc * seq_len)
else:
win_len = perc
start1 = np.random.randint(0, seq_len - win_len)
end1 = start1 + win_len
start2 = np.random.randint(0, seq_len - win_len)
end2 = start2 + win_len
if abs(start1 - start2) < (win_class):
label = 0
elif abs(start1 - start2) < (2 * win_class):
label = 1
elif abs(start1 - start2) < (3 * win_class):
label = 2
else:
label = 3
return ts[start1:end1, ...], ts[start2:end2, ...], label
def cut_piece5C(ts, perc=.1):
seq_len = ts.shape[0]
win_class = seq_len / (2 * 5)
if perc < 1:
win_len = int(perc * seq_len)
else:
win_len = perc
start1 = np.random.randint(0, seq_len - win_len)
end1 = start1 + win_len
start2 = np.random.randint(0, seq_len - win_len)
end2 = start2 + win_len
if abs(start1 - start2) < (win_class):
label = 0
elif abs(start1 - start2) < (2 * win_class):
label = 1
elif abs(start1 - start2) < (3 * win_class):
label = 2
elif abs(start1 - start2) < (4 * win_class):
label = 3
else:
label = 4
return ts[start1:end1, ...], ts[start2:end2, ...], label
def cut_piece6C(ts, perc=.1):
seq_len = ts.shape[0]
win_class = seq_len / (2 * 6)
if perc < 1:
win_len = int(perc * seq_len)
else:
win_len = perc
start1 = np.random.randint(0, seq_len - win_len)
end1 = start1 + win_len
start2 = np.random.randint(0, seq_len - win_len)
end2 = start2 + win_len
if abs(start1 - start2) < (win_class):
label = 0
elif abs(start1 - start2) < (2 * win_class):
label = 1
elif abs(start1 - start2) < (3 * win_class):
label = 2
elif abs(start1 - start2) < (4 * win_class):
label = 3
elif abs(start1 - start2) < (5 * win_class):
label = 4
else:
label = 5
return ts[start1:end1, ...], ts[start2:end2, ...], label
def cut_piece7C(ts, perc=.1):
seq_len = ts.shape[0]
win_class = seq_len / (2 * 7)
if perc < 1:
win_len = int(perc * seq_len)
else:
win_len = perc
start1 = np.random.randint(0, seq_len - win_len)
end1 = start1 + win_len
start2 = np.random.randint(0, seq_len - win_len)
end2 = start2 + win_len
if abs(start1 - start2) < (win_class):
label = 0
elif abs(start1 - start2) < (2 * win_class):
label = 1
elif abs(start1 - start2) < (3 * win_class):
label = 2
elif abs(start1 - start2) < (4 * win_class):
label = 3
elif abs(start1 - start2) < (5 * win_class):
label = 4
elif abs(start1 - start2) < (6 * win_class):
label = 5
else:
label = 6
return ts[start1:end1, ...], ts[start2:end2, ...], label
def cut_piece8C(ts, perc=.1):
seq_len = ts.shape[0]
win_class = seq_len / (2 * 8)
if perc < 1:
win_len = int(perc * seq_len)
else:
win_len = perc
start1 = np.random.randint(0, seq_len - win_len)
end1 = start1 + win_len
start2 = np.random.randint(0, seq_len - win_len)
end2 = start2 + win_len
if abs(start1 - start2) < (win_class):
label = 0
elif abs(start1 - start2) < (2 * win_class):
label = 1
elif abs(start1 - start2) < (3 * win_class):
label = 2
elif abs(start1 - start2) < (4 * win_class):
label = 3
elif abs(start1 - start2) < (5 * win_class):
label = 4
elif abs(start1 - start2) < (6 * win_class):
label = 5
elif abs(start1 - start2) < (7 * win_class):
label = 6
else:
label = 7
return ts[start1:end1, ...], ts[start2:end2, ...], label
def jitter(x, sigma=0.03):
# https://arxiv.org/pdf/1706.00527.pdf
return x + np.random.normal(loc=0., scale=sigma, size=x.shape)
def scaling(x, sigma=0.1):
# https://arxiv.org/pdf/1706.00527.pdf
factor = np.random.normal(loc=1., scale=sigma, size=(x.shape[0],x.shape[2]))
return np.multiply(x, factor[:,np.newaxis,:])
def rotation(x):
flip = np.random.choice([-1, 1], size=(x.shape[0],x.shape[2]))
rotate_axis = np.arange(x.shape[2])
np.random.shuffle(rotate_axis)
return flip[:,np.newaxis,:] * x[:,:,rotate_axis]
def scaling_s(x, sigma=0.1, plot=False):
# https://arxiv.org/pdf/1706.00527.pdf
factor = np.random.normal(loc=1., scale=sigma, size=(1, x.shape[1]))
x_ = np.multiply(x, factor[:, :])
if plot:
hlp.plot1d(x, x_, save_file='aug_examples/scal.png')
return x_
def rotation_s(x, plot=False):
flip = np.random.choice([-1], size=(1, x.shape[1]))
rotate_axis = np.arange(x.shape[1])
np.random.shuffle(rotate_axis)
x_ = flip[:, :] * x[:, rotate_axis]
if plot:
hlp.plot1d(x, x_, save_file='aug_examples/rotation_s.png')
return x_
def rotation2d(x, sigma=0.2):
thetas = np.random.normal(loc=0, scale=sigma, size=(x.shape[0]))
c = np.cos(thetas)
s = np.sin(thetas)
ret = np.zeros_like(x)
for i, pat in enumerate(x):
rot = np.array(((c[i], -s[i]), (s[i], c[i])))
ret[i] = np.dot(pat, rot)
return ret
def permutation(x, max_segments=5, seg_mode="equal"):
orig_steps = np.arange(x.shape[1])
num_segs = np.random.randint(1, max_segments, size=(x.shape[0]))
ret = np.zeros_like(x)
for i, pat in enumerate(x):
if num_segs[i] > 1:
if seg_mode == "random":
split_points = np.random.choice(x.shape[1]-2, num_segs[i]-1, replace=False)
split_points.sort()
splits = np.split(orig_steps, split_points)
else:
splits = np.array_split(orig_steps, num_segs[i])
warp = np.concatenate(np.random.permutation(splits)).ravel()
ret[i] = pat[warp]
else:
ret[i] = pat
return ret
def magnitude_warp(x, sigma=0.2, knot=4):
from scipy.interpolate import CubicSpline
orig_steps = np.arange(x.shape[1])
random_warps = np.random.normal(loc=1.0, scale=sigma, size=(x.shape[0], knot+2, x.shape[2]))
warp_steps = (np.ones((x.shape[2],1))*(np.linspace(0, x.shape[1]-1., num=knot+2))).T
ret = np.zeros_like(x)
for i, pat in enumerate(x):
li = []
for dim in range(x.shape[2]):
li.append(CubicSpline(warp_steps[:, dim], random_warps[i, :, dim])(orig_steps))
warper = np.array(li).T
ret[i] = pat * warper
return ret
def magnitude_warp_s(x, sigma=0.2, knot=4, plot=False):
from scipy.interpolate import CubicSpline
orig_steps = np.arange(x.shape[0])
random_warps = np.random.normal(loc=1.0, scale=sigma, size=(1, knot + 2, x.shape[1]))
warp_steps = (np.ones((x.shape[1], 1)) * (np.linspace(0, x.shape[0] - 1., num=knot + 2))).T
li = []
for dim in range(x.shape[1]):
li.append(CubicSpline(warp_steps[:, dim], random_warps[0, :, dim])(orig_steps))
warper = np.array(li).T
x_ = x * warper
if plot:
hlp.plot1d(x, x_, save_file='aug_examples/magnitude_warp_s.png')
return x_
def time_warp(x, sigma=0.2, knot=4):
from scipy.interpolate import CubicSpline
orig_steps = np.arange(x.shape[1])
random_warps = np.random.normal(loc=1.0, scale=sigma, size=(x.shape[0], knot+2, x.shape[2]))
warp_steps = (np.ones((x.shape[2],1))*(np.linspace(0, x.shape[1]-1., num=knot+2))).T
ret = np.zeros_like(x)
for i, pat in enumerate(x):
for dim in range(x.shape[2]):
time_warp = CubicSpline(warp_steps[:,dim], warp_steps[:,dim] * random_warps[i,:,dim])(orig_steps)
scale = (x.shape[1]-1)/time_warp[-1]
ret[i,:,dim] = np.interp(orig_steps, np.clip(scale*time_warp, 0, x.shape[1]-1), pat[:,dim]).T
return ret
def time_warp_s(x, sigma=0.2, knot=4, plot=False):
from scipy.interpolate import CubicSpline
orig_steps = np.arange(x.shape[0])
random_warps = np.random.normal(loc=1.0, scale=sigma, size=(1, knot + 2, x.shape[1]))
warp_steps = (np.ones((x.shape[1], 1)) * (np.linspace(0, x.shape[0] - 1., num=knot + 2))).T
ret = np.zeros_like(x)
for dim in range(x.shape[1]):
time_warp = CubicSpline(warp_steps[:, dim],
warp_steps[:, dim] * random_warps[0, :, dim])(orig_steps)
scale = (x.shape[0] - 1) / time_warp[-1]
ret[:, dim] = np.interp(orig_steps, np.clip(scale * time_warp, 0, x.shape[0] - 1),
x[:, dim]).T
if plot:
hlp.plot1d(x, ret, save_file='aug_examples/time_warp_s.png')
return ret
def window_slice(x, reduce_ratio=0.9):
# https://halshs.archives-ouvertes.fr/halshs-01357973/document
target_len = np.ceil(reduce_ratio*x.shape[1]).astype(int)
if target_len >= x.shape[1]:
return x
starts = np.random.randint(low=0, high=x.shape[1]-target_len, size=(x.shape[0])).astype(int)
ends = (target_len + starts).astype(int)
ret = np.zeros_like(x)
for i, pat in enumerate(x):
for dim in range(x.shape[2]):
ret[i,:,dim] = np.interp(np.linspace(0, target_len, num=x.shape[1]), np.arange(target_len), pat[starts[i]:ends[i],dim]).T
return ret
def window_slice_s(x, reduce_ratio=0.9):
# https://halshs.archives-ouvertes.fr/halshs-01357973/document
target_len = np.ceil(reduce_ratio * x.shape[0]).astype(int)
if target_len >= x.shape[0]:
return x
starts = np.random.randint(low=0, high=x.shape[0] - target_len, size=(1)).astype(int)
ends = (target_len + starts).astype(int)
ret = np.zeros_like(x)
for dim in range(x.shape[1]):
ret[:, dim] = np.interp(np.linspace(0, target_len, num=x.shape[0]), np.arange(target_len),
x[starts[0]:ends[0], dim]).T
return ret
def window_warp(x, window_ratio=0.1, scales=[0.5, 2.]):
# https://halshs.archives-ouvertes.fr/halshs-01357973/document
warp_scales = np.random.choice(scales, x.shape[0])
warp_size = np.ceil(window_ratio*x.shape[1]).astype(int)
window_steps = np.arange(warp_size)
window_starts = np.random.randint(low=1, high=x.shape[1]-warp_size-1, size=(x.shape[0])).astype(int)
window_ends = (window_starts + warp_size).astype(int)
ret = np.zeros_like(x)
for i, pat in enumerate(x):
for dim in range(x.shape[2]):
start_seg = pat[:window_starts[i],dim]
window_seg = np.interp(np.linspace(0, warp_size-1, num=int(warp_size*warp_scales[i])), window_steps, pat[window_starts[i]:window_ends[i],dim])
end_seg = pat[window_ends[i]:,dim]
warped = np.concatenate((start_seg, window_seg, end_seg))
ret[i,:,dim] = np.interp(np.arange(x.shape[1]), np.linspace(0, x.shape[1]-1., num=warped.size), warped).T
return ret
def window_warp_s(x, window_ratio=0.1, scales=[0.5, 2.]):
# https://halshs.archives-ouvertes.fr/halshs-01357973/document
warp_scales = np.random.choice(scales, 1)
warp_size = np.ceil(window_ratio * x.shape[0]).astype(int)
window_steps = np.arange(warp_size)
window_starts = np.random.randint(low=1, high=x.shape[0] - warp_size - 1, size=(1)).astype(int)
window_ends = (window_starts + warp_size).astype(int)
ret = np.zeros_like(x)
pat=x
for dim in range(x.shape[1]):
start_seg = pat[:window_starts[0], dim]
window_seg = np.interp(np.linspace(0, warp_size - 1,
num=int(warp_size * warp_scales[0])), window_steps,
pat[window_starts[0]:window_ends[0], dim])
end_seg = pat[window_ends[0]:, dim]
warped = np.concatenate((start_seg, window_seg, end_seg))
ret[:, dim] = np.interp(np.arange(x.shape[0]), np.linspace(0, x.shape[0] - 1., num=warped.size),
warped).T
return ret
def spawner(x, labels, sigma=0.05, verbose=0):
# https://www.ncbi.nlm.nih.gov/pmc/articles/PMC6983028/
import utils.dtw as dtw
random_points = np.random.randint(low=1, high=x.shape[1]-1, size=x.shape[0])
window = np.ceil(x.shape[1] / 10.).astype(int)
orig_steps = np.arange(x.shape[1])
l = np.argmax(labels, axis=1) if labels.ndim > 1 else labels
ret = np.zeros_like(x)
for i, pat in enumerate(tqdm(x)):
# guarentees that same one isnt selected
choices = np.delete(np.arange(x.shape[0]), i)
# remove ones of different classes
choices = np.where(l[choices] == l[i])[0]
if choices.size > 0:
random_sample = x[np.random.choice(choices)]
# SPAWNER splits the path into two randomly
path1 = dtw.dtw(pat[:random_points[i]], random_sample[:random_points[i]], dtw.RETURN_PATH, slope_constraint="symmetric", window=window)
path2 = dtw.dtw(pat[random_points[i]:], random_sample[random_points[i]:], dtw.RETURN_PATH, slope_constraint="symmetric", window=window)
combined = np.concatenate((np.vstack(path1), np.vstack(path2+random_points[i])), axis=1)
if verbose:
print(random_points[i])
dtw_value, cost, DTW_map, path = dtw.dtw(pat, random_sample,
return_flag = dtw.RETURN_ALL,
slope_constraint=slope_constraint,
window=window)
dtw.draw_graph1d(cost, DTW_map, path, pat, random_sample)
dtw.draw_graph1d(cost, DTW_map, combined, pat, random_sample)
mean = np.mean([pat[combined[0]], random_sample[combined[1]]], axis=0)
for dim in range(x.shape[2]):
ret[i,:,dim] = np.interp(orig_steps, np.linspace(0, x.shape[1]-1., num=mean.shape[0]), mean[:,dim]).T
else:
print("There is only one pattern of class %d, skipping pattern average"%l[i])
ret[i,:] = pat
return jitter(ret, sigma=sigma)
def wdba(x, labels, batch_size=6, slope_constraint="symmetric", use_window=True):
# https://ieeexplore.ieee.org/document/8215569
import utils.dtw as dtw
if use_window:
window = np.ceil(x.shape[1] / 10.).astype(int)
else:
window = None
orig_steps = np.arange(x.shape[1])
l = np.argmax(labels, axis=1) if labels.ndim > 1 else labels
ret = np.zeros_like(x)
for i in tqdm(range(ret.shape[0])):
# get the same class as i
choices = np.where(l == l[i])[0]
if choices.size > 0:
# pick random intra-class pattern
k = min(choices.size, batch_size)
random_prototypes = x[np.random.choice(choices, k, replace=False)]
# calculate dtw between all
dtw_matrix = np.zeros((k, k))
for p, prototype in enumerate(random_prototypes):
for s, sample in enumerate(random_prototypes):
if p == s:
dtw_matrix[p, s] = 0.
else:
dtw_matrix[p, s] = dtw.dtw(prototype, sample, dtw.RETURN_VALUE, slope_constraint=slope_constraint, window=window)
# get medoid
medoid_id = np.argsort(np.sum(dtw_matrix, axis=1))[0]
nearest_order = np.argsort(dtw_matrix[medoid_id])
medoid_pattern = random_prototypes[medoid_id]
# start weighted DBA
average_pattern = np.zeros_like(medoid_pattern)
weighted_sums = np.zeros((medoid_pattern.shape[0]))
for nid in nearest_order:
if nid == medoid_id or dtw_matrix[medoid_id, nearest_order[1]] == 0.:
average_pattern += medoid_pattern
weighted_sums += np.ones_like(weighted_sums)
else:
path = dtw.dtw(medoid_pattern, random_prototypes[nid], dtw.RETURN_PATH, slope_constraint=slope_constraint, window=window)
dtw_value = dtw_matrix[medoid_id, nid]
warped = random_prototypes[nid, path[1]]
weight = np.exp(np.log(0.5)*dtw_value/dtw_matrix[medoid_id, nearest_order[1]])
average_pattern[path[0]] += weight * warped
weighted_sums[path[0]] += weight
ret[i,:] = average_pattern / weighted_sums[:,np.newaxis]
else:
print("There is only one pattern of class %d, skipping pattern average"%l[i])
ret[i,:] = x[i]
return ret
# Proposed
def random_guided_warp(x, labels, slope_constraint="symmetric", use_window=True, dtw_type="normal"):
import utils.dtw as dtw
if use_window:
window = np.ceil(x.shape[1] / 10.).astype(int)
else:
window = None
orig_steps = np.arange(x.shape[1])
l = np.argmax(labels, axis=1) if labels.ndim > 1 else labels
ret = np.zeros_like(x)
for i, pat in enumerate(tqdm(x)):
# guarentees that same one isnt selected
choices = np.delete(np.arange(x.shape[0]), i)
# remove ones of different classes
choices = np.where(l[choices] == l[i])[0]
if choices.size > 0:
# pick random intra-class pattern
random_prototype = x[ | np.random.choice(choices) | numpy.random.choice |
"""
Class for generation and management of synthetic single-layer networks according to the XOR model.
It assumes a mixed effect of the community and hierarchical latent structures.
Possible options: model with s permuted, model with s not permuted.
"""
import math
import warnings
import numpy as np
import pandas as pd
import networkx as nx
import scipy.sparse as sparse
from numba import jit
class SyntNetXOR(object):
def __init__(self, m=1, N=100, K=3, l=1, prng=42, avg_degree=10., mu=0.5, structure='assortative', label='test',
beta=1e4, gamma=0.5, delta0=0.01, eta=0.5, ag=0.6, bg=1., corr=0., over=0., means=(), stds=(),
verbose=0, folder='../../data/input', L1=False, output_parameters=False, output_adj=False,
outfile_adj='None', use_leagues=False, permute=True):
self.N = N # network size (node number)
self.m = m # number of networks to be generated
self.prng = prng # seed random number generator
self.label = label # label (associated uniquely with the set of inputs)
self.folder = folder # input data folder path
self.output_parameters = output_parameters # flag for storing the parameters
self.output_adj = output_adj # flag for storing the generated adjacency matrix
self.outfile_adj = outfile_adj # name for saving the adjacency matrix
self.avg_degree = avg_degree # required average degree
self.delta0 = delta0 # outgroup interaction probability
self.permute = permute # flag for permuting s variables (not overlapping option)
if verbose > 2 and not isinstance(verbose, int):
raise ValueError('The verbosity parameter can only assume values in {0,1,2}!')
self.verbose = verbose # verbosity flag
if mu < 0 or mu > 1:
raise ValueError('The Binomial parameter mu has to be in [0, 1]!')
if mu == 1: mu = 1 - 1e-13
if mu == 0: mu = 1e-13
self.mu = mu # sigma latent variable a prior mean
''' Community-related inputs '''
if structure not in ['assortative', 'disassortative', 'core-periphery', 'directed-biased']:
raise ValueError('The available structures for the affinity matrix w '
'are: assortative, disassortative, core-periphery '
'and directed-biased!')
self.structure = structure # the affinity matrix structure
self.K = K # number of communities
if eta <= 0 and L1:
raise ValueError('The Dirichlet parameter eta has to be positive!')
self.eta = eta # eta parameter of the Dirichlet distribution
if ag <= 0 and not L1:
raise ValueError('The Gamma parameter alpha has to be positive!')
self.ag = ag # alpha parameter of the Gamma distribution
if bg <= 0 and not L1:
raise ValueError('The Gamma parameter beta has to be positive!')
self.bg = bg # beta parameter of the Gamma distribution
self.L1 = L1 # flag for soft u,v generation preference, True -> Dirichlet, False -> Gamma
if (corr < 0) or (corr > 1):
raise ValueError('The correlation parameter has to be in [0, 1]!')
self.corr = corr # correlation between u and v synthetically generated
if (over < 0) or (over > 1):
raise ValueError('The overlapping parameter has to be in [0, 1]!')
self.over = over # fraction of nodes with mixed membership
''' Ranking-related inputs '''
self.use_leagues = use_leagues
if not self.use_leagues:
l = 1
self.l = l # the number of Gaussian for s
if len(means) == self.l:
self.means = means # means for s
else:
self.means = None
if len(stds) == self.l:
self.stds = stds # standard deviations for s
else:
self.stds = None
self.beta = beta # inverse temperature parameter
if gamma <= 0:
raise ValueError('The spring constant gamma has to be positive!')
self.gamma = gamma # spring constant for (s, origin)
def EitherOr_planted_network(self, parameters=None):
"""
Generate a directed, possibly weighted network by using the XOR model.
Steps:
1. Generate or load the latent variables.
2. Extract A_ij entries (network edges) from a combination of Poisson
distributions;
Parameters
----------
parameters : object
Latent variables z, s, u, v and w.
Returns
----------
G : Digraph
DiGraph NetworkX object. Self-loops allowed.
"""
# Set seed random number generator
prng = np.random.RandomState(self.prng)
''' Latent variables '''
if parameters is None:
# Generate latent variables
self.z, self.s, self.u, self.v, self.w, nodes_s = self._generate_lv(prng)
else:
# Set latent variables
self.z, self.s, self.u, self.v, self.w, nodes_s = parameters
k_sr, k_mt, c, eps = 0., 0., 0., 0.
if (self.z == 0).all():
warnings.warn('All Z entries are 0: Generation with MT model.')
self.s = np.zeros(self.N)
S = np.zeros((self.N, self.N))
k_mt = self.avg_degree
else:
# Compute normalization term for c_sr
deltas = delta_scores(self.s)
expH = np.exp(-self.beta * 0.5 * np.power((deltas - 1), 2))
# Compute c_sr
eps = 2 * self.mu * (1-self.mu) * self.delta0 * self.N
k_sr = self.mu * (self.avg_degree - eps) * (self.mu**2 + (1-self.mu)**2)
c = self.N * k_sr / (self.mu * (self.mu**2 + (1-self.mu)**2) * expH.sum())
S = c * expH
if (self.z == 1).all():
warnings.warn('All Z entries are 1: Generation with SR model.')
self.u = np.zeros((self.N, self.K))
self.v = np.zeros((self.N, self.K))
self.w = np.zeros((self.K, self.K))
M = | np.zeros((self.N, self.N)) | numpy.zeros |
import numpy
import scipy.misc
import pickle
import datetime
import os
from sklearn import linear_model
from ancestral_atom_learning import AncestralAtomLearning
# from gen_extract_operators import ExtractOperatorsGenerator
from gen_mean_downsampling_operators import gen_extract_operators
from utils.image_patch_utils import gen_patch_2d, restore_2d
def mexh(t, sigma=1.0):
return 2 / (numpy.pi**(1/4) * numpy.sqrt(3*sigma)) * (1-(t/sigma)**2) * numpy.exp(-t**2/(2*sigma**2))
# set parameters for ExtractOperatorsGenerator
ancestor_size = numpy.array([64,])
patchsize = numpy.array([64,])
ancestor_shift = numpy.array([8,])
# data_shift = numpy.array((4, 4))
max_level = 3
# learning_parameters
fit_args = {
'learning_rate': 1e-4,
'iteration': 30,
'normalize_dict': False,
'verbose': True,
}
# crate instance of generator of the extract operators
# generator = ExtractOperatorsGenerator(2, ancestor_size, patchsize, shift)
# generate the extract operators
downsampled_size = [numpy.array([64 // 2**i,]) for i in range(0, max_level)]
# downsampled_size = [numpy.array([ancestor_size[0]//(2**x), ancestor_size[1]//(2**x)]) for x in range(max_level)]
extract_operators = gen_extract_operators(ancestor_size, downsampled_size, patchsize, ancestor_shift)
ancestor_true = numpy.column_stack([mexh(numpy.linspace(-4, 4, 64)), mexh(numpy.linspace(-4, 4, 64), sigma=2.0), mexh(numpy.linspace(-4, 4, 64), sigma=0.2)])
# ancestor_true = numpy.sin(numpy.linspace(0, 2*numpy.pi, 64))
D_true = numpy.column_stack([numpy.dot(F, ancestor_true) for F in extract_operators])
# p = []
# for l in range(max_level):
# num = (ancestor_size[0]-downsampled_size[l][0]) // ancestor_shift[0] + 1
# p += [1/(max_level * num)] * num
l0norm = 5
data_num = 3000
C_true = numpy.zeros(shape=(D_true.shape[1], data_num))
for col in range(data_num):
rows = numpy.random.choice(C_true.shape[0], l0norm)
C_true[rows, col] = numpy.random.normal(size=l0norm)
y = numpy.dot(D_true, C_true)
y += numpy.random.normal(scale=0.01, size=y.shape)
# y = gen_patch_2d(image, patchsize, data_shift)
y_mean = | numpy.mean(y, axis=0) | numpy.mean |
import argparse
import json
import os
import time
import glob
import numpy as np
#import tensorflow as tf
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
from utils import image_utils
from srgraph import SRGraph
import metrics as es
from tqdm import tqdm
# arguments
parser = argparse.ArgumentParser()
parser.add_argument('--config_path', required=True, help='path of the config file (.json)')
parser.add_argument('--model_path', required=True, help='path of the model file (.pb)')
parser.add_argument('--data_path', required=True, help='ROOT FOLDER OF DATA (self)')
#parser.add_argument('--input_path', default='LR', help='folder path of the lower resolution (input) images')
#parser.add_argument('--output_path', default='output', help='folder path of the high resolution (output) images')
parser.add_argument('--scale', default=4, help='upscaling factor')
parser.add_argument('--self_ensemble', action='store_true', help='employ self ensemble')
parser.add_argument('--cuda_device', default='-1', help='CUDA device index to be used (will be set to the environment variable \'CUDA_VISIBLE_DEVICES\')')
args = parser.parse_args()
# constants
IMAGE_EXTS = ['.png', '.jpg']
def main():
# initialize
tf.logging.set_verbosity(tf.logging.INFO)
# SR config
with open(args.config_path, 'r') as f:
sr_config = json.load(f)
# SR graph
sr_model = SRGraph()
sr_model.prepare(scale=args.scale, standalone=True, config=sr_config, model_path=args.model_path)
# image reader/writer
image_reader = image_utils.ImageReader()
image_writer = image_utils.ImageWriter()
# image path list
image_path_list = []
data_path = args.data_path
lr_path = os.path.join(data_path, f'LR/X{args.scale}')
gt_path = os.path.join(data_path, 'HR')
lr_pathdir = sorted(glob.glob(lr_path+'/*'))
gt_pathdir = sorted(glob.glob(gt_path+'/*'))
print(f'model_path: {args.model_path}, scale: {args.scale}')
# run a dummy image to initialize internal graph
input_image = np.zeros([32, 32, 3], dtype=np.uint8)
sr_model.get_output([input_image])
# iterate
mlog = {'psnr':[], 'ssim':[], 'mse':[]} # (self)
running_time_list = []
for input_path, gt_path in tqdm(zip(lr_pathdir, gt_pathdir), total=len(lr_pathdir)):
input_image = image_reader.read(input_path)
gt_image = image_reader.read(gt_path)
running_time = 0.0
if (args.self_ensemble):
output_images = []
ensemble_running_time_list = []
for flip_index in range(2): # for flipping
input_image = np.transpose(input_image, axes=(1, 0, 2))
for rotate_index in range(4): # for rotating
input_image = np.rot90(input_image, k=1, axes=(0, 1))
t1 = time.perf_counter()
output_image = sr_model.get_output([input_image])[0]
t2 = time.perf_counter()
ensemble_running_time_list.append(t2 - t1)
output_image = np.clip(output_image, 0, 255)
output_image = np.rot90(output_image, k=(3-rotate_index), axes=(0, 1))
if (flip_index == 0):
output_image = | np.transpose(output_image, axes=(1, 0, 2)) | numpy.transpose |
from bolsonaro.models.omp_forest import OmpForest, SingleOmpForest
from bolsonaro.utils import binarize_class_data, omp_premature_warning
import numpy as np
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import OrthogonalMatchingPursuit
import warnings
class OmpForestBinaryClassifier(SingleOmpForest):
DEFAULT_SCORE_METRIC = 'indicator'
def __init__(self, models_parameters):
estimator = RandomForestClassifier(**models_parameters.hyperparameters,
random_state=models_parameters.seed, n_jobs=-1)
super().__init__(models_parameters, estimator)
def _check_classes(self, y):
assert len(set(y).difference({-1, 1})) == 0, "Classes for binary classifier must be {-1, +1}"
def fit(self, X_forest, y_forest, X_omp, y_omp, use_distillation=False):
self._check_classes(y_forest)
self._check_classes(y_omp)
return super().fit(X_forest, y_forest, X_omp, y_omp, use_distillation=use_distillation)
def _base_estimator_predictions(self, X):
predictions_0_1 = super()._base_estimator_predictions(X)
predictions = (predictions_0_1 - 0.5) * 2
return predictions
def score_base_estimator(self, X, y):
predictions = self._base_estimator_predictions(X)
evaluation = np.sum(np.sign(np.mean(predictions, axis=1)) == y) / len(y)
return evaluation
def predict_no_weights(self, X):
"""
Apply the SingleOmpForest to X without using the weights.
Make all the base tree predictions
:param X: a Forest
:return: a np.array of the predictions of the entire forest
"""
forest_predictions = self._base_estimator_predictions(X)
weights = self._omp.coef_
omp_trees_predictions = forest_predictions[:, weights != 0]
# Here forest_pred is the probability of being class 1.
result_omp = np.mean(omp_trees_predictions, axis=1)
return result_omp
def score(self, X, y, metric=DEFAULT_SCORE_METRIC):
"""
Evaluate OMPForestClassifer on (`X`, `y`) using `metric`
:param X:
:param y:
:param metric: might be "indicator"
:return:
"""
predictions = self.predict(X)
if metric == 'indicator':
evaluation = np.abs(np.mean(np.abs(np.sign(predictions) - y) - 1))
else:
raise ValueError("Unsupported metric '{}'.".format(metric))
return evaluation
class OmpForestMulticlassClassifier(OmpForest):
DEFAULT_SCORE_METRIC = 'indicator'
def __init__(self, models_parameters):
estimator = RandomForestClassifier(**models_parameters.hyperparameters,
random_state=models_parameters.seed, n_jobs=-1)
super().__init__(models_parameters, estimator)
# question: peut-être initialiser les omps dans le __init__? comme pour le SingleOmpForest
self._dct_class_omp = {}
def fit_omp(self, atoms, objective):
assert len(self._dct_class_omp) == 0, "fit_omp can be called only once on {}".format(self.__class__.__name__)
possible_classes = sorted(set(objective))
for class_label in possible_classes:
atoms_binary = binarize_class_data(atoms, class_label, inplace=False)
objective_binary = binarize_class_data(objective, class_label, inplace=False)
# TODO: peut etre considérer que la taille de forêt est globale et donc seulement une fraction est disponible pour chaque OMP...
omp_class = OrthogonalMatchingPursuit(
n_nonzero_coefs=self.models_parameters.extracted_forest_size,
fit_intercept=True, normalize=False)
with warnings.catch_warnings(record=True) as caught_warnings:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
omp_class.fit(atoms_binary, objective_binary)
# ignore any non-custom warnings that may be in the list
caught_warnings = list(filter(lambda i: i.message != RuntimeWarning(omp_premature_warning), caught_warnings))
if len(caught_warnings) > 0:
self._logger.error(f'number of linear dependences in the dictionary: {len(caught_warnings)}. model parameters: {str(self._models_parameters.__dict__)}')
self._dct_class_omp[class_label] = omp_class
return self._dct_class_omp
def predict(self, X):
'''forest_predictions = self._base_estimator_predictions(X)
print(forest_predictions.shape)
if self._models_parameters.normalize_D:
forest_predictions /= self._forest_norms
label_names = []
preds = []
for class_label, omp_class in self._dct_class_omp.items():
label_names.append(class_label)
atoms_binary = binarize_class_data(forest_predictions, class_label, inplace=False)
print(atoms_binary.shape)
preds.append(self._make_omp_weighted_prediction(atoms_binary, omp_class, self._models_parameters.normalize_weights))
# TODO: verifier que ce n'est pas bugué ici
preds = np.array(preds).T'''
forest_predictions = np.array([tree.predict_proba(X) for tree in self._base_forest_estimator.estimators_]).T
if self._models_parameters.normalize_D:
forest_predictions /= self._forest_norms
label_names = []
preds = []
num_class = 0
for class_label, omp_class in self._dct_class_omp.items():
label_names.append(class_label)
atoms_binary = (forest_predictions[num_class] - 0.5) * 2 # centré réduit de 0/1 à -1/1
preds.append(self._make_omp_weighted_prediction(atoms_binary, omp_class, self._models_parameters.normalize_weights))
num_class += 1
preds = np.array(preds).T
max_preds = np.argmax(preds, axis=1)
return np.array(label_names)[max_preds]
def predict_no_weights(self, X):
"""
Apply the SingleOmpForest to X without using the weights.
Make all the base tree predictions
:param X: a Forest
:return: a np.array of the predictions of the entire forest
"""
forest_predictions = np.array([tree.predict_proba(X) for tree in self._base_forest_estimator.estimators_]).T
if self._models_parameters.normalize_D:
forest_predictions = forest_predictions.T
forest_predictions /= self._forest_norms
forest_predictions = forest_predictions.T
label_names = []
preds = []
num_class = 0
for class_label, omp_class in self._dct_class_omp.items():
weights = omp_class.coef_
omp_trees_indices = np.nonzero(weights)
label_names.append(class_label)
atoms_binary = (forest_predictions[num_class].T - 0.5) * 2 # centré réduit de 0/1 à -1/1
preds.append(np.sum(atoms_binary[omp_trees_indices], axis=0)/len(omp_trees_indices))
num_class += 1
preds = np.array(preds).T
max_preds = np.argmax(preds, axis=1)
return np.array(label_names)[max_preds]
def score(self, X, y, metric=DEFAULT_SCORE_METRIC):
predictions = self.predict(X)
if metric == 'indicator':
evaluation = np.sum(np.ones_like(predictions)[predictions == y]) / X.shape[0]
else:
raise ValueError("Unsupported metric '{}'.".format(metric))
return evaluation
@staticmethod
def _make_omp_weighted_prediction(base_predictions, omp_obj, normalize_weights=False):
if normalize_weights:
# we can normalize weights (by their sum) so that they sum to 1
# and they can be interpreted as impact percentages for interpretability.
# this necessits to remove the (-) in weights, e.g. move it to the predictions (use unsigned_coef) --> I don't see why
# question: je comprend pas le truc avec nonszero?
# predictions = self._omp.predict(forest_predictions) * (1 / (np.sum(self._omp.coef_) / len(np.nonzero(self._omp.coef_))))
coef_signs = np.sign(omp_obj.coef_)[np.newaxis, :] # add axis to make sure it will be broadcasted line-wise (there might be a confusion when forest_prediction is square)
unsigned_coef = (coef_signs * omp_obj.coef_).squeeze()
intercept = omp_obj.intercept_
adjusted_forest_predictions = base_predictions * coef_signs
predictions = adjusted_forest_predictions.dot(unsigned_coef) + intercept
else:
predictions = omp_obj.predict(base_predictions)
return predictions
if __name__ == "__main__":
forest = RandomForestClassifier(n_estimators=10)
X = np.random.rand(10, 5)
y = | np.random.choice([-1, +1], 10) | numpy.random.choice |
#Standard python libraries
import os
import warnings
import copy
import time
import itertools
import functools
#Dependencies - numpy, scipy, matplotlib, pyfftw
import numpy as np
import matplotlib.pyplot as plt
import pyfftw
from pyfftw.interfaces.numpy_fft import fft, fftshift, ifft, ifftshift, fftfreq
from scipy.interpolate import interp1d as sinterp1d
import scipy
from scipy.sparse import save_npz, load_npz, eye, csr_matrix
from scipy.sparse.linalg import eigs
from ufss import DiagramGenerator
from scipy.integrate import RK45
class RK_rho_container:
def __init__(self,t,rho,pulse_number,manifold_key,*,interp_kind='linear',
optical_gap = 0):
self.pulse_number = pulse_number
self.n, self.M = rho.shape
self.manifold_key = manifold_key
self.optical_gap = optical_gap
if t.size == 1:
self.M = self.M+2
self.t = np.array([-1,0,1],dtype='float') * np.spacing(t[0]) + t[0]
rho_new = np.zeros((self.n,3),dtype='complex')
rho_new[:,0] = 0
rho_new[:,1] = 0.5 * rho[:,0]
rho_new[:,2] = rho[:,0]
self.rho = rho_new
self.interp = self.make_interpolant(kind='zero')
else:
self.t = t
self.rho = rho
self.interp = self.make_interpolant(kind=interp_kind)
self.t_checkpoint = t
self.rho_checkpoint = rho
def make_interpolant(self,*, kind='cubic'):
"""Interpolates density matrix
"""
return sinterp1d(self.t,self.rho,fill_value = (0,np.nan),bounds_error = False,
assume_sorted=True,kind=kind)
def one_time_step(self,rho0,t0,tf,*,find_best_starting_time = True):
if find_best_starting_time and tf < self.t_checkpoint[-1]:
diff1 = tf - t0
diff2 = tf - self.t[-1]
closest_t_checkpoint_ind = np.argmin(np.abs(self.t_checkpoint - tf))
closest_t_checkpoint = self.t_checkpoint[closest_t_checkpoint_ind]
diff3 = tf - closest_t_checkpoint
rho0s = [rho0,self.rho[:,-1],self.rho_checkpoint[:,closest_t_checkpoint_ind]]
neighbor_ind = closest_t_checkpoint_ind - 1
if neighbor_ind >= 0:
neighbor = self.t_checkpoint[closest_t_checkpoint_ind-1]
diff4 = tf - neighbor
rho0s.append(self.rho_checkpoint[:,neighbor_ind])
else:
neighbor = np.nan
diff4 = np.inf
t0s = np.array([t0,self.t[-1],closest_t_checkpoint,neighbor])
diffs = np.array([diff1,diff2,diff3,diff4])
for i in range(diffs.size):
if diffs[i] < 0:
diffs[i] = np.inf
if np.allclose(diffs,np.inf):
raise ValueError('Method extend is only valid for times after the pulse has ended')
t0 = t0s[np.argmin(diffs)]
rho0 = rho0s[np.argmin(diffs)]
elif find_best_starting_time and tf > self.t_checkpoint[-1]:
if self.t_checkpoint[-1] > t0:
t0 = self.t_checkpoint[-1]
rho0 = self.rho_checkpoint[:,-1]
else:
pass
else:
pass
# RWA_gap = self.manifold.dot(np.array([1,-1])) * self.optical_gap
return self.one_time_step_function(rho0,t0,tf,manifold_key=self.manifold_key)#,RWA_gap = RWA_gap)
def extend(self,t):
ans = np.zeros((self.n,t.size),dtype='complex')
if t[0] >= self.t_checkpoint[0]:
t_intersect, t_inds, t_checkpoint_inds = np.intersect1d(t,self.t_checkpoint,return_indices=True)
ans[:,t_inds] = self.rho_checkpoint[:,t_checkpoint_inds]
if t_inds.size == t.size:
return ans
else:
all_t_inds = np.arange(t.size)
other_t_inds = np.setdiff1d(all_t_inds,t_inds)
t0 = self.t_checkpoint[-1]
rho0 = self.rho_checkpoint[:,-1]
if t[other_t_inds[0]] >= t0:
find_best_starting_time = False
else:
find_best_starting_time = True
for t_ind in other_t_inds:
tf = t[t_ind]
ans[:,t_ind] = self.one_time_step(rho0,t0,tf,find_best_starting_time = find_best_starting_time)
t0 = tf
rho0 = ans[:,t_ind]
elif t[0] >= self.t[-1]:
t0 = self.t[-1]
rho0 = self.rho[:,-1]
for i in range(len(t)):
ans[:,i] = self.one_time_step(rho0,t0,t[i],find_best_starting_time = True)
t0 = t[i]
rho0 = ans[:,i]
else:
raise ValueError('Method extend is only valid for times after the pulse has ended')
self.rho_checkpoint = ans
self.t_checkpoint = t
return ans
def __call__(self,t):
"""Assumes t is sorted """
if type(t) is np.ndarray:
pass
elif type(t) is list:
t = np.array(t)
else:
t = np.array([t])
extend_inds = np.where(t>self.t[-1])
interp_inds = np.where(t<=self.t[-1])
ta = t[interp_inds]
tb = t[extend_inds]
if ta.size > 0:
ans_a_flag = True
if ta.size == self.M and np.allclose(ta,self.t):
ans_a = self.rho
else:
ans_a = self.interp(ta)
else:
ans_a_flag = False
if tb.size > 0:
ans_b = self.extend(tb)
ans_b_flag = True
else:
ans_b_flag = False
if ans_a_flag and ans_b_flag:
ans = np.hstack((ans_a,ans_b))
elif ans_a_flag:
ans = ans_a
elif ans_b_flag:
ans = ans_b
else:
ans = None
return ans
def __getitem__(self,inds):
return self.rho[:,inds]
class RKE_DensityMatrices(DiagramGenerator):
"""This class is designed to calculate perturbative wavepackets in the
light-matter interaction given the eigenvalues of the unperturbed
hamiltonian and the material dipole operator evaluated in the
eigenbasis of the unperturbed hamiltonian.
Args:
file_path (string): path to folder containing eigenvalues and the
dipole operator for the system Hamiltonian
detection_type (string): options are 'polarization' (default) or 'fluorescence'
"""
def __init__(self,file_path,*,detection_type = 'polarization',
conserve_memory=False):
self.slicing_time = 0
self.interpolation_time = 0
self.expectation_time = 0
self.RK45_step_time = 0
self.dipole_dot_rho_time = 0
self.dipole_time = 0
self.automation_time = 0
self.diagram_to_signal_time = 0
self.base_path = file_path
self.undersample_factor = 1
self.gamma_res = 6.91
self.sparsity_threshold = 0.1
self.conserve_memory = conserve_memory
self.load_L()
self.set_rho_shapes()
if not self.conserve_memory:
self.load_mu()
try:
self.load_H_mu()
# more efficient if H_mu is available
self.dipole_down = self.dipole_down_H_mu
except:
# generally less efficient - mostly here for backwards compatibility
self.dipole_down = self.dipole_down_L_mu
self.optical_gap = 0
self.atol = 1E-6
self.rtol = 1E-5
if detection_type == 'polarization':
self.rho_to_signal = self.polarization_detection_rho_to_signal
self.return_complex_signal = False
elif detection_type == 'complex_polarization':
self.rho_to_signal = self.polarization_detection_rho_to_signal
self.return_complex_signal = True
detection_type = 'polarization'
elif detection_type == 'integrated_polarization':
raise Exception('detection_type: Integrated polarization is not implemented for Open RKE')
self.rho_to_signal = self.integrated_polarization_detection_rho_to_signal
elif detection_type == 'fluorescence':
self.rho_to_signal = self.fluorescence_detection_rho_to_signal
DiagramGenerator.__init__(self,detection_type=detection_type)
self.KB_dict = {'Bu':self.bra_up,'Ku':self.ket_up,'Kd':self.ket_down,'Bd':self.bra_down}
# Code will not actually function until the following three empty lists are set by the user
self.efields = [] #initialize empty list of electric field shapes
self.efield_times = [] #initialize empty list of times assoicated with each electric field shape
self.dts = [] #initialize empty list of time spacings associated with each electric field shape
self.polarization_sequence = [] #initialize empty polarization sequence
self.pulse_times = [] #initialize empty list of pulse arrival times
self.centers = [] #initialize empty list of pulse center frequencies
self.efield_wavevectors = []
self.rhos = dict()
# Initialize unperturbed wavefunction
self.set_rho0_auto()
def set_pulse_delays(self,all_delays):
"""Must be a list of numpy arrays, where each array is a
list of delay times between pulses
"""
self.all_pulse_delays = all_delays
num_delays = len(self.all_pulse_delays)
num_pulses = len(self.efields)
if num_delays == num_pulses - 1:
pass
elif num_delays == num_pulses - 2 and self.detection_type == 'polarization':
# If there is a local oscillator, it arrives simultaneously with the last pulse
self.all_pulse_delays.append(np.array([0]))
elif num_delays <= num_pulses -2:
raise Exception('There are not enough delay times')
elif num_delays >= num_pulses:
raise Exception('There are too many delay times')
def calculate_diagrams_all_delays(self,diagrams):
t0 = time.time()
num_delays = len(self.all_pulse_delays)
num_pulses = len(self.efields)
all_delay_combinations = list(itertools.product(*self.all_pulse_delays))
signal_shape = [delays.size for delays in self.all_pulse_delays]
if self.detection_type == 'polarization':
signal = np.zeros((len(all_delay_combinations),self.w.size),dtype='complex')
if len(signal_shape) == self.pdc.shape[0]:
# get rid of the "delay" between the last pulse and the local oscillator
signal_shape[-1] = self.w.size
elif len(signal_shape) == self.pdc.shape[0] - 1:
# append the shape of the polariation-detection axis
signal_shape.append(self.w.size)
else:
raise Exception('Cannot automatically determine final signal shape')
else:
signal = np.zeros((len(all_delay_combinations)),dtype='complex')
counter = 0
for delays in all_delay_combinations:
arrival_times = [0]
for delay in delays:
arrival_times.append(arrival_times[-1]+delay)
if self.detection_type == 'polarization':
signal[counter,:] = self.calculate_diagrams(diagrams,arrival_times)
else:
signal[counter] = self.calculate_diagrams(diagrams,arrival_times)
counter += 1
self.signal = signal.reshape(signal_shape)
self.calculation_time = time.time() - t0
return self.signal
def save_timing(self):
save_dict = {'RKE_calculation_time':self.calculation_time}
np.savez(os.path.join(self.base_path,'RKE_calculation_time.npz'),**save_dict)
def calculate_signal_all_delays(self):
t0 = time.time()
num_delays = len(self.all_pulse_delays)
num_pulses = len(self.efields)
all_delay_combinations = list(itertools.product(*self.all_pulse_delays))
signal_shape = [delays.size for delays in self.all_pulse_delays]
if self.detection_type == 'polarization':
signal = np.zeros((len(all_delay_combinations),self.w.size),dtype='complex')
if len(signal_shape) == self.pdc.shape[0]:
# get rid of the "delay" between the last pulse and the local oscillator
signal_shape[-1] = self.w.size
elif len(signal_shape) == self.pdc.shape[0] - 1:
# append the shape of the polariation-detection axis
signal_shape.append(self.w.size)
else:
raise Exception('Cannot automatically determine final signal shape')
else:
signal = np.zeros((len(all_delay_combinations)),dtype='complex')
counter = 0
for delays in all_delay_combinations:
arrival_times = [0]
for delay in delays:
arrival_times.append(arrival_times[-1]+delay)
if self.detection_type == 'polarization':
signal[counter,:] = self.calculate_signal(arrival_times)
else:
signal[counter] = self.calculate_signal(arrival_times)
counter += 1
self.signal = signal.reshape(signal_shape)
self.calculation_time = time.time() - t0
return self.signal
def set_t(self,optical_dephasing_rate,*,dt='auto'):
"""Sets the time grid upon which all frequency-detected signals will
be calculated on
"""
max_pos_t = int(self.gamma_res/optical_dephasing_rate)
if dt == 'auto':
dt = self.dts[-1] # signal detection bandwidth determined by local oscillator
self.t = np.arange(-max_pos_t,max_pos_t+dt/2,dt)
# if self.t.size % 2:
# self.t = self.t[:-1]
self.w = fftshift(fftfreq(self.t.size,d=dt)*2*np.pi)
def execute_diagram(self,instructions):
r = self.rho0
name = ''
for i in range(len(instructions)):
key, num = instructions[i]
name += key+str(num)
# Try to re-use previous calculations, if they exist
try:
new_r = self.rhos[name]
except KeyError:
new_r = self.KB_dict[key](r,pulse_number=num)
self.rhos[name] = new_r
r = new_r
sig = self.rho_to_signal(r)
return sig
def remove_rhos_by_pulse_number(self,pulse_number):
num = str(pulse_number)
keys = self.rhos.keys()
keys_to_remove = []
for key in keys:
flag = key.find(num)
if flag >= 0:
keys_to_remove.append(key)
for key in keys_to_remove:
self.rhos.pop(key)
def calculate_signal(self,arrival_times):
t0 = time.time()
try:
old_pulse_times = self.pulse_times
for i in range(len(old_pulse_times)):
if old_pulse_times[i] != arrival_times[i]:
self.remove_rhos_by_pulse_number(i)
except AttributeError:
pass
self.pulse_times = arrival_times
if self.detection_type == 'polarization':
times = [self.efield_times[i] + arrival_times[i] for i in range(len(arrival_times)-1)]
elif self.detection_type == 'integrated_polarization':
times = [self.efield_times[i] + arrival_times[i] for i in range(len(arrival_times)-1)]
elif self.detection_type == 'fluorescence':
times = [self.efield_times[i] + arrival_times[i] for i in range(len(arrival_times))]
efield_permutations = self.relevant_permutations(times)
diagram_instructions = []
for perm in efield_permutations:
diagram_instructions += self.instructions_from_permutation(perm)
self.current_instructions = diagram_instructions
t1 = time.time()
try:
instructions = diagram_instructions[0]
signal = self.execute_diagram(instructions)
for instructions in diagram_instructions[1:]:
signal += self.execute_diagram(instructions)
except IndexError:
signal = 0
t2 = time.time()
self.automation_time += t1-t0
self.diagram_to_signal_time += t2-t1
return signal
def calculate_diagrams(self,diagram_instructions,arrival_times):
try:
old_pulse_times = self.pulse_times
for i in range(len(old_pulse_times)):
if old_pulse_times[i] != arrival_times[i]:
self.remove_rhos_by_pulse_number(i)
except AttributeError:
pass
self.pulse_times = arrival_times
self.current_instructions = diagram_instructions
instructions = diagram_instructions[0]
signal = self.execute_diagram(instructions)
for instructions in diagram_instructions[1:]:
signal += self.execute_diagram(instructions)
return signal
def polarization_detection_rho_to_signal(self,rho):
p_of_t = self.dipole_expectation(rho,pulse_number=-1,ket_flag=True)
return self.polarization_to_signal(p_of_t,local_oscillator_number=-1)
def integrated_polarization_detection_rho_to_signal(self,rho):
p = self.integrated_dipole_expectation(rho,ket_flag=True)
return self.integrated_polarization_to_signal(p,local_oscillator_number=-1)
# def fluorescence_detection_rho_to_signal(self,rho):
# L_size = self.eigenvalues[0].size
# H_size = int(np.sqrt(L_size))
# # reshape rho into a normal density matrix representation
# rho = rho.reshape((H_size,H_size))
# fluorescence_yield = np.array([0,1,1,self.f_yield])
# signal = np.dot(np.diagonal(rho),fluorescence_yield)
# return signal
def set_efields(self,times_list,efields_list,centers_list,phase_discrimination,*,reset_rhos = True,
plot_fields = False):
self.efield_times = times_list
self.efields = efields_list
self.centers = centers_list
self.set_phase_discrimination(phase_discrimination)
self.dts = []
self.efield_frequencies = []
if reset_rhos:
self.rhos = dict()
for t in times_list:
if t.size == 1:
dt = 1
w = np.array([0])
else:
dt = t[1] - t[0]
w = fftshift(fftfreq(t.size,d=dt))*2*np.pi
self.dts.append(dt)
self.efield_frequencies.append(w)
self.dt = self.dts[0]
if self.detection_type == 'polarization':
try:
self.local_oscillator = self.efields[-1].copy()
except:
self.local_oscillator = copy.deepcopy(self.efields[-1])
for field in self.efields:
if len(field) == 1:
# M = 1 is the impulsive limit
pass
else:
self.check_efield_resolution(field,plot_fields = plot_fields)
def check_efield_resolution(self,efield,*,plot_fields = False):
efield_tail = np.max(np.abs([efield[0],efield[-1]]))
if efield_tail > np.max(np.abs(efield))/100:
warnings.warn('Consider using larger num_conv_points, pump does not decay to less than 1% of maximum value in time domain')
efield_fft = fftshift(fft(ifftshift(efield)))*self.dt
efield_fft_tail = np.max(np.abs([efield_fft[0],efield_fft[-1]]))
if efield_fft_tail > np.max(np.abs(efield_fft))/100:
warnings.warn('''Consider using smaller value of dt, pump does not decay to less than 1% of maximum value in frequency domain''')
if plot_fields:
fig, axes = plt.subplots(1,2)
l1,l2, = axes[0].plot(self.efield_t,np.real(efield),self.efield_t,np.imag(efield))
plt.legend([l1,l2],['Real','Imag'])
axes[1].plot(self.efield_w,np.real(efield_fft),self.efield_w,np.imag(efield_fft))
axes[0].set_ylabel('Electric field Amp')
axes[0].set_xlabel('Time ($\omega_0^{-1})$')
axes[1].set_xlabel('Frequency ($\omega_0$)')
fig.suptitle('Check that efield is well-resolved in time and frequency')
plt.show()
def set_local_oscillator_phase(self,phase):
self.efields[-1] = np.exp(1j*phase) * self.local_oscillator
def get_closest_index_and_value(self,value,array):
"""Given an array and a desired value, finds the closest actual value
stored in that array, and returns that value, along with its corresponding
array index
"""
index = np.argmin(np.abs(array - value))
value = array[index]
return index, value
def load_L(self):
"""Load in known eigenvalues. Must be stored as a numpy archive file,
with keys: GSM, SEM, and optionally DEM. The eigenvalues for each manifold
must be 1d arrays, and are assumed to be ordered by increasing energy. The
energy difference between the lowest energy ground state and the lowest
energy singly-excited state should be set to 0
"""
L_save_name = os.path.join(self.base_path,'L.npz')
try:
with np.load(L_save_name,allow_pickle=True) as L_archive:
self.L = dict()
for key in L_archive.keys():
L = L_archive[key]
if L.dtype == np.dtype('O'):
self.L[key] = L[()]
else:
if self.check_sparsity(L):
self.L[key] = csr_matrix(L)
else:
self.L[key] = L
except:
self.L = {'all_manifolds':load_npz(L_save_name)}
self.manifolds = list(self.L.keys())
def check_sparsity(self,mat):
csr_mat = csr_matrix(mat)
sparsity = csr_mat.nnz / (csr_mat.shape[0]*csr_mat.shape[1])
if sparsity < self.sparsity_threshold:
return True
else:
return False
def dL(self,t,rho):
try:
L = self.L['all_manifolds']
except KeyError:
L = self.L[rho.manifold_key]
return L.dot(rho)
def get_dL_manual(self,manifold_key):
try:
L = self.L['all_manifolds']
except KeyError:
L = self.L[manifold_key]
def L_fun(t,rho):
return L.dot(rho)
return L_fun
def one_time_step_function(self,rho0,t0,tf,*,manifold_key = None):
num_steps = 0
if manifold_key == None:
rk45 = RK45(self.dL,t0,rho0,tf,atol=self.atol,rtol=self.rtol)
else:
dL = self.get_dL_manual(manifold_key)
rk45 = RK45(dL,t0,rho0,tf,atol=self.atol,rtol=self.rtol)
while rk45.t < tf:
rk45.step()
num_steps += 1
rho_final = rk45.y
return rho_final
def get_bottom_eigenvector(self):
try:
L = self.L['all_manifolds']
except KeyError:
L = self.L['00']
if L.shape == (1,1):
e = L[0,0]
ev = np.array([[1]])
else:
e, ev = eigs(L,k=1,which='SM',maxiter=10000)
if e.size == 1 and np.allclose(e,0):
pass
else:
raise Exception('Smallest magnitude eigenvalue of L is {}. L must have a single stationary state for this code to work'.format(e))
v = ev[:,0]
H_size = int(np.sqrt(v.size))
rho = v.reshape((H_size,H_size))
trace = rho.trace()
v = v/trace # Need to start with a trace 1 object
return v
def set_rho0_auto(self):
try:
rho0 = np.load(os.path.join(self.base_path,'rho0.npy'))
except FileNotFoundError:
rho0 = self.get_bottom_eigenvector()
t = np.array([-np.inf,0,np.inf])
rho0 = rho0[:,np.newaxis] * np.ones((rho0.size,t.size))
pulse_number = None
manifold_key = '00'
self.rho0 = RK_rho_container(t,rho0,pulse_number,manifold_key,
interp_kind = 'zero',optical_gap = self.optical_gap)
def set_rho_shapes(self):
self.rho_shapes = dict()
if 'all_manifolds' in self.manifolds:
L_size = self.L['all_manifolds'].size
H_size = int(np.sqrt(L_size))
self.rho_shapes['all_manifolds'] = (H_size,H_size)
else:
H_sizes = dict()
for key in self.manifolds:
ket_key, bra_key = key
if ket_key == bra_key:
L_size = self.L[key].shape[0]
H_size = int(np.sqrt(L_size))
H_sizes[ket_key] = H_size
for key in self.manifolds:
ket_key, bra_key = key
ket_size = H_sizes[ket_key]
bra_size = H_sizes[bra_key]
self.rho_shapes[key] = (ket_size,bra_size)
def load_mu(self):
"""Load the precalculated dipole overlaps. The dipole operator must
be stored as a .npz file, and must contain at least one array, each with three
indices: (new manifold index, old manifold eigenfunction,
cartesian coordinate)."""
try:
file_name = os.path.join(self.base_path,'mu_site_basis.npz')
with np.load(file_name) as mu_archive:
self.mu = {key:mu_archive[key] for key in mu_archive.keys()}
except FileNotFoundError:
try:
file_name = os.path.join(self.base_path,'mu_original_L_basis.npz')
with np.load(file_name) as mu_archive:
self.mu = {key:mu_archive[key] for key in mu_archive.keys()}
except FileNotFoundError:
file_name = os.path.join(self.base_path,'mu.npz')
with np.load(file_name) as mu_archive:
self.mu = {key:mu_archive[key] for key in mu_archive.keys()}
sparse_flags = []
for key in self.mu.keys():
mu_2D = np.sum(np.abs(self.mu[key])**2,axis=-1)
sparse_flags.append(self.check_sparsity(mu_2D))
sparse_flags = np.array(sparse_flags)
if np.allclose(sparse_flags,True):
self.sparse_mu_flag = True
else:
self.sparse_mu_flag = False
for key in self.mu.keys():
mu_x = self.mu[key][...,0]
mu_y = self.mu[key][...,1]
mu_z = self.mu[key][...,2]
if self.sparse_mu_flag:
self.mu[key] = [csr_matrix(mu_x),csr_matrix(mu_y),csr_matrix(mu_z)]
else:
self.mu[key] = [mu_x,mu_y,mu_z]
print('RKE_sparse_mu_flag',self.sparse_mu_flag)
### Setting the electric field to be used
def set_polarization_sequence(self,polarization_list,*,reset_rhos=True):
"""Sets the sequences used for either parallel or crossed pump and probe
Args:
polarization_list (list): list of four strings, can be 'x','y' or 'z'
Returns:
None: sets the attribute polarization sequence
"""
x = np.array([1,0,0])
y = np.array([0,1,0])
z = np.array([0,0,1])
pol_options = {'x':x,'y':y,'z':z}
self.polarization_sequence = [pol_options[pol] for pol in polarization_list]
if reset_rhos:
self.rhos = dict()
### Tools for recursively calculating perturbed density maatrices using TDPT
def dipole_matrix(self,pulse_number,key,ket_flag=True,up_flag=True):
"""Calculates the dipole matrix given the electric field polarization vector,
if ket_flag = False then uses the bra-interaction"""
t0 = time.time()
pol = self.polarization_sequence[pulse_number]
x = np.array([1,0,0])
y = np.array([0,1,0])
z = np.array([0,0,1])
try:
mu = self.mu[key]
except KeyError:
if ket_flag:
key = 'ket'
else:
key = 'bra'
if up_flag:
key += '_up'
else:
key += '_down'
mu = self.mu[key]
if np.all(pol == x):
overlap_matrix = mu[0]#.copy()
elif np.all(pol == y):
overlap_matrix = mu[1]#.copy()
elif np.all(pol == z):
overlap_matrix = mu[2]#.copy()
else:
overlap_matrix = mu[0]*pol[0] + mu[1]*pol[1] + mu[2]*pol[2]
# if self.sparse_mu_flag:
# to_return = csr_matrix(overlap_matrix)
# else:
# to_return = overlap_matrix
t1 = time.time()
self.dipole_time += t1-t0
return overlap_matrix
def manifold_key_to_array(self,key):
"""Key must be a string of exactly 2 integers, the first describing
the ket manifold, the second the bra manifold. If the density
matrix is represented in the full space, rather than being divided
into manifolds, the first integer reperesents the total number of
excitations to the ket side, and the second integers represents
the sum of all excitations to the bra side."""
if len(key) != 2:
raise Exception('manifold key must be a string of exactly two intgers')
return np.array([int(char) for char in key],dtype=int)
def manifold_array_to_key(self,manifold):
"""Inverse of self.manifold_key_to_array"""
if manifold.size != 2 or manifold.dtype != int:
raise Exception('manifold array must contain exactly 2 integer')
return str(manifold[0]) + str(manifold[1])
def next_order(self,rho_in,*,ket_flag=True,up_flag=True,pulse_number = 0):
"""This function connects psi_p to psi+pj^(*) using the Euler Method.
Args:
rho_in (rho_container): input density matrix
pulse_number (int): index of optical pulse (0,1,2,...)
Return:
rho_dict (rho_container): next-order density matrix
"""
pulse_time = self.pulse_times[pulse_number]
t = self.efield_times[pulse_number] + pulse_time
old_manifold_key = rho_in.manifold_key
if up_flag:
change = 1
else:
change = -1
if ket_flag:
manifold_change = np.array([change,0],dtype=int)
else:
manifold_change = np.array([0,change],dtype=int)
old_manifold = self.manifold_key_to_array(old_manifold_key)
new_manifold = old_manifold + manifold_change
new_manifold_key = self.manifold_array_to_key(new_manifold)
mu_key = old_manifold_key + '_to_' + new_manifold_key
if ket_flag == up_flag:
# Rotating term excites the ket and de-excites the bra
conjugate_flag = False
else:
# Counter-rotating term
conjugate_flag = True
if conjugate_flag:
center = -self.centers[pulse_number]
else:
center = self.centers[pulse_number]
M = t.size
old_rho = rho_in(t)
if self.conserve_memory:
# move back to the basis the Liouvillian was written in
if 'all_manifolds' in self.manifolds:
ket_size,bra_size = self.rho_shapes['all_manifolds']
else:
ket_size,bra_size = self.rho_shapes[old_manifold_key]
old_rho = old_rho.reshape(ket_size,bra_size,M)
if ket_flag:
old_ket_key = old_manifold_key[0]
new_ket_key = new_manifold_key[0]
if up_flag:
H_mu_key = old_ket_key + '_to_' + new_ket_key
else:
H_mu_key = new_ket_key + '_to_' + old_ket_key
mu_up_flag = up_flag
else:
old_bra_key = old_manifold_key[1]
new_bra_key = new_manifold_key[1]
if up_flag:
H_mu_key = old_bra_key + '_to_' + new_bra_key
else:
H_mu_key = new_bra_key + '_to_' + old_bra_key
mu_up_flag = not up_flag
overlap_matrix = self.get_H_mu(pulse_number,H_mu_key,up_flag=mu_up_flag)
ta = time.time()
if ket_flag:
mu_old_rho = np.einsum('ij,jkl',overlap_matrix,old_rho)
else:
mu_old_rho = np.einsum('ijl,jk',old_rho,overlap_matrix)
tb = time.time()
rho_vec_size = mu_old_rho.shape[0]*mu_old_rho.shape[1]
mu_old_rho = mu_old_rho.reshape(rho_vec_size,M)
else:
overlap_matrix = self.dipole_matrix(pulse_number,mu_key,ket_flag=ket_flag,up_flag=up_flag)
ta = time.time()
mu_old_rho = overlap_matrix.dot(old_rho)
tb = time.time()
self.dipole_dot_rho_time += tb - ta
next_rho = np.zeros(mu_old_rho.shape,dtype='complex')
if M == 1:
next_rho[:,0] = self.efields[pulse_number] * mu_old_rho
else:
if conjugate_flag:
efield = self.efields[pulse_number]*np.exp(-1j*center*t)
else:
efield = np.conjugate(self.efields[pulse_number])*np.exp(-1j*center*t)
############
# This 1j vs -1j needs to be derived!!! #####
############
if ket_flag:
efield = 1j * efield
else:
efield = -1j * efield
###########
###########
dt = self.dts[pulse_number]
next_rho[:,0] = efield[0] * mu_old_rho[:,0] * dt
for i in range(1,t.size):
rho0 = next_rho[:,i-1]
t0 = t[i-1]
ta = time.time()
next_rho[:,i] = self.one_time_step_function(rho0,t0,t[i],manifold_key=new_manifold_key)
tb = time.time()
self.RK45_step_time += tb - ta
next_rho[:,i] += efield[i] * mu_old_rho[:,i] * dt
# # i/hbar Straight from perturbation theory
# if ket_flag:
# rho *= 1j
# else:
# rho *= -1j
rho_out = RK_rho_container(t,next_rho,pulse_number,new_manifold_key,
optical_gap = self.optical_gap)
rho_out.one_time_step_function = self.one_time_step_function
return rho_out
def ket_up(self,rho_in,*,pulse_number = 0):
"""This method connects psi_p to psi_pj where the next order psi
is one manifold above the current manifold.
Args:
rho_in (rho_container): input density matrix
pulse_number (int): index of optical pulse (0,1,2,...)
Returns:
(rho_container): output from method next_order
"""
return self.next_order(rho_in,ket_flag=True,up_flag=True,
pulse_number = pulse_number)
def ket_down(self,rho_in,*,pulse_number = 0):
"""This method connects psi_p to psi_pj where the next order psi
is one manifold above the current manifold.
Args:
rho_in (rho_container): input density matrix
pulse_number (int): index of optical pulse (0,1,2,...)
Returns:
(rho_container): output from method next_order
"""
return self.next_order(rho_in,ket_flag=True,up_flag=False,
pulse_number = pulse_number)
def bra_up(self,rho_in,*,pulse_number = 0):
"""This method connects psi_p to psi_pj where the next order psi
is one manifold above the current manifold.
Args:
rho_in (rho_container): input density matrix
pulse_number (int): index of optical pulse (0,1,2,...)
Returns:
(rho_container): output from method next_order
"""
return self.next_order(rho_in,ket_flag=False,up_flag=True,
pulse_number = pulse_number)
def bra_down(self,rho_in,*,pulse_number = 0):
"""This method connects psi_p to psi_pj where the next order psi
is one manifold above the current manifold.
Args:
rho_in (rho_container): input density matrix
pulse_number (int): index of optical pulse (0,1,2,...)
Returns:
(rho_container): output from method next_order
"""
return self.next_order(rho_in,ket_flag=False,up_flag=False,
pulse_number = pulse_number)
### Tools for taking the expectation value of the dipole operator with perturbed density matrices
def dipole_down_H_mu(self,rho,manifold_key,*,new_manifold_mask = None,pulse_number = -1,
ket_flag=True):
"""This method is similar to the method down, but does not involve
the electric field shape or convolutions. It is the action of the
dipole operator on the ket-side without TDPT effects. It also includes
the dot product of the final electric field polarization vector."""
if not ket_flag:
raise Exception('Not implemented for bra-side')
old_manifold_key = manifold_key
old_ket_key = old_manifold_key[0]
new_ket_key = str(int(old_ket_key)-1)
mu_key = new_ket_key + '_to_' + old_ket_key
if ket_flag:
center = - self.centers[pulse_number]
conjugate_flag = True
else:
center = self.centers[pulse_number]
conjugate_flag = False
t_size = rho.shape[-1]
if 'all_manifolds' in self.L.keys():
L_size = rho.size
H_size = int(np.sqrt(L_size))
rho = rho.reshape(H_size,H_size,t_size)
else:
ket_manifold_key = old_manifold_key[0] + old_manifold_key[0]
ket_L_manifold_size = self.L[ket_manifold_key].shape[0]
ket_H_size = int(np.sqrt(ket_L_manifold_size))
bra_manifold_key = old_manifold_key[1] + old_manifold_key[1]
bra_L_manifold_size = self.L[bra_manifold_key].shape[0]
bra_H_size = int(np.sqrt(bra_L_manifold_size))
rho = rho.reshape(ket_H_size,bra_H_size,t_size)
overlap_matrix = self.get_H_mu(pulse_number,mu_key,ket_flag=ket_flag,up_flag=False)
t0 = time.time()
polarization_field = np.einsum('ij,jik',overlap_matrix,rho)
t1 = time.time()
return polarization_field
def dipole_down_L_mu(self,rho,manifold_key,*,new_manifold_mask = None,
pulse_number = -1,ket_flag=True):
"""This method is similar to the method down, but does not involve
the electric field shape or convolutions. It is the action of the
dipole operator on the ket-side without TDPT effects. It also includes
the dot product of the final electric field polarization vector."""
old_manifold_key = manifold_key
change = -1
if ket_flag:
manifold_change = np.array([change,0])
else:
manifold_change = np.array([0,change])
old_manifold = self.manifold_key_to_array(old_manifold_key)
new_manifold = old_manifold + manifold_change
new_manifold_key = self.manifold_array_to_key(new_manifold)
mu_key = old_manifold_key + '_to_' + new_manifold_key
if ket_flag:
center = - self.centers[pulse_number]
conjugate_flag = True
else:
center = self.centers[pulse_number]
conjugate_flag = False
rho_in = rho
overlap_matrix = self.dipole_matrix(pulse_number,mu_key,ket_flag=True,up_flag=False)
t0 = time.time()
rho = overlap_matrix.dot(rho_in)
t1 = time.time()
L_size = rho.shape[0]
H_size = int(np.sqrt(L_size))
# reshape rho into a normal density matrix representation
rho = rho.reshape((H_size,H_size,rho.shape[-1]))
polarization_field = np.einsum('iij',rho)
return polarization_field
def load_H_mu(self):
parent_dir = os.path.split(self.base_path)[0]
file_name = os.path.join(parent_dir,'closed','mu.npz')
with np.load(file_name) as mu_archive:
self.H_mu = {key:mu_archive[key] for key in mu_archive.keys()}
def get_H_mu(self,pulse_number,key,ket_flag=True,up_flag=True):
"""Calculates the dipole matrix given the electric field polarization vector,
if ket_flag = False then uses the bra-interaction"""
t0 = time.time()
pol = self.polarization_sequence[pulse_number]
x = np.array([1,0,0])
y = np.array([0,1,0])
z = np.array([0,0,1])
try:
mu = self.H_mu[key]
except KeyError:
try:
key = 'up'
mu = self.H_mu[key]
except KeyError:
key = 'ket_up'
mu = self.H_mu[key]
if np.all(pol == x):
overlap_matrix = mu[:,:,0].copy()
elif np.all(pol == y):
overlap_matrix = mu[:,:,1].copy()
elif np.all(pol == z):
overlap_matrix = mu[:,:,2].copy()
else:
overlap_matrix = np.tensordot(mu,pol,axes=(-1,0))
if not up_flag:
overlap_matrix = overlap_matrix.T
t1 = time.time()
self.dipole_time += t1-t0
return overlap_matrix
def set_undersample_factor(self,frequency_resolution):
"""dt is set by the pulse. However, the system dynamics may not require such a
small dt. Therefore, this allows the user to set a requested frequency
resolution for any spectrally resolved signals."""
# f = pi/dt
dt = np.pi/frequency_resolution
u = int(np.floor(dt/self.dt))
self.undersample_factor = max(u,1)
def dipole_expectation(self,rho_in,*,pulse_number = -1,ket_flag=True):
"""Computes the expectation value of the dipole operator"""
t0 = time.time()
pulse_number = -1
pulse_time = self.pulse_times[pulse_number]
efield_t = self.efield_times[pulse_number] + pulse_time
if ket_flag:
center = - self.centers[pulse_number]
else:
center = self.centers[pulse_number]
# The signal is zero before the final pulse arrives, and persists
# until it decays. Therefore we avoid taking the sum at times
# where the signal is zero.
t = self.t + pulse_time
pulse_start_ind = np.argmin(np.abs(t-efield_t[0]))
if efield_t[0] < t[pulse_start_ind]:
pulse_start_ind -= 1
pulse_end_ind = np.argmin(np.abs(t-efield_t[-1]))
if efield_t[-1] > t[pulse_end_ind]:
pulse_end_ind += 1
t_slice = slice(pulse_start_ind,None,None)
t1_slice = slice(pulse_start_ind,pulse_end_ind,None)
u = self.undersample_factor
t2_slice = slice(pulse_end_ind,None,u)
t = self.t[t_slice] + pulse_time
t1 = self.t[t1_slice] + pulse_time
t2 = self.t[t2_slice] + pulse_time
rho1 = rho_in(t1)
rho2 = rho_in(t2)
rho1 *= np.exp(-1j * center * t1[np.newaxis,:])
rho2 *= np.exp(-1j * center * t2[np.newaxis,:])
# _u is an abbreviation for undersampled
t_u = np.hstack((t1,t2))
rho_u = np.hstack((rho1,rho2))
tb = time.time()
self.slicing_time += tb-t0
t0 = time.time()
exp_val_u = self.dipole_down(rho_u,rho_in.manifold_key,pulse_number = pulse_number,
ket_flag = ket_flag)
tb = time.time()
self.expectation_time += tb-t0
t0 = time.time()
# Interpolate expectation value back onto the full t-grid
if u != 1:
# Often must extrapolate the final point
exp_val_interp = scipy.interpolate.interp1d(t_u,exp_val_u,kind='cubic',fill_value='extrapolate')
exp_val = exp_val_interp(t)
else:
exp_val = exp_val_u
# print(exp_val.size/exp_val_u.size)
tb = time.time()
self.interpolation_time += tb-t0
# Initialize return array with zeros
ret_val = np.zeros(self.t.size,dtype='complex')
# set non-zero values using t_slice
ret_val[pulse_start_ind:] = exp_val
return ret_val
def integrated_dipole_expectation(self,rho_in,*,ket_flag=True):
"""Computes the expectation value of the dipole operator"""
pulse_number = -1
pulse_time = self.pulse_times[pulse_number]
t = pulse_time + self.efield_times[pulse_number]
if ket_flag:
center = - self.centers[pulse_number]
else:
center = self.centers[pulse_number]
rho = rho_in(t)
rho_nonzero = rho_in.bool_mask
try:
ev = self.eigenvalues['all_manifolds'][rho_nonzero]
except KeyError:
ev = self.eigenvalues[rho_in.manifold_key][rho_nonzero]
rho = rho * np.exp((ev[:,np.newaxis] - 1j*center)*t)
rho_dict = {'bool_mask':rho_nonzero,'rho':rho,'manifold_key':rho_in.manifold_key}
t0 = time.time()
exp_val = self.dipole_down(rho_dict,pulse_number = pulse_number,
ket_flag = ket_flag)
tb = time.time()
self.expectation_time += tb-t0
return exp_val
def get_local_oscillator(self):
local_oscillator_number = -1
efield_t = self.efield_times[local_oscillator_number]
efield = self.efields[local_oscillator_number]
if efield_t.size == 1:
# Impulsive limit: delta in time is flat in frequency
efield_ft = np.ones(self.w.size)*efield
return efield_ft
e_dt = efield_t[1] - efield_t[0]
dt = self.t[1] - self.t[0]
if (np.isclose(e_dt,dt) and efield_t[-1] <= self.t[-1]):
full_efield = np.zeros(self.t.size,dtype='complex')
# the local oscillator sets the "zero" on the clock
pulse_time_ind = np.argmin(np.abs(self.t))
pulse_start_ind = pulse_time_ind - efield_t.size//2
pulse_end_ind = pulse_time_ind + efield_t.size//2 + efield_t.size%2
t_slice = slice(pulse_start_ind, pulse_end_ind,None)
full_efield[t_slice] = efield
efield_ft = fftshift(ifft(ifftshift(full_efield)))*full_efield.size * dt
else:
efield_ft = fftshift(ifft(ifftshift(efield))) * efield.size * e_dt
efield_w = fftshift(fftfreq(efield_t.size,d=e_dt)) * 2 * np.pi
fill_value = (efield_ft[0],efield_ft[-1])
f = sinterp1d(efield_w,efield_ft,fill_value = fill_value,
bounds_error=False,kind='quadratic')
efield_ft = f(self.w)
return efield_ft
def polarization_to_signal(self,P_of_t_in,*,
local_oscillator_number = -1,undersample_factor = 1):
"""This function generates a frequency-resolved signal from a polarization field
local_oscillator_number - usually the local oscillator will be the last pulse
in the list self.efields"""
undersample_slice = slice(None,None,undersample_factor)
P_of_t = P_of_t_in[undersample_slice].copy()
t = self.t[undersample_slice]
dt = t[1] - t[0]
pulse_time = self.pulse_times[local_oscillator_number]
efield_t = self.efield_times[local_oscillator_number]
pulse_time_ind = np.argmin(np.abs(self.t))
efield = self.get_local_oscillator()
halfway = self.w.size//2
pm = self.w.size//(2*undersample_factor)
efield_min_ind = halfway - pm
efield_max_ind = halfway + pm + self.w.size%2
efield = efield[efield_min_ind:efield_max_ind]
P_of_w = fftshift(ifft(ifftshift(P_of_t)))*len(P_of_t)*dt#/np.sqrt(2*np.pi)
signal = P_of_w * np.conjugate(efield)
if not self.return_complex_signal:
return np.imag(signal)
else:
return 1j*signal
def integrated_polarization_to_signal(self,P,*,
local_oscillator_number = -1):
"""This function generates a frequency-resolved signal from a polarization field
local_oscillator_number - usually the local oscillator will be the last pulse
in the list self.efields"""
efield_t = self.efield_times[local_oscillator_number]
efield = self.efields[local_oscillator_number]
signal = np.trapz(P * np.conjugate(efield),x=efield_t)
return np.imag(signal)
def save(self,file_name,pulse_delay_names,*,use_base_path=True):
if use_base_path:
file_name = os.path.join(self.base_path,file_name)
save_dict = {}
for name,delays in zip(pulse_delay_names,self.all_pulse_delays):
save_dict[name] = delays
if self.detection_type == 'polarization':
save_dict['wt'] = self.w
save_dict['signal'] = self.signal
save_dict['signal_calculation_time'] = self.calculation_time
| np.savez(file_name,**save_dict) | numpy.savez |
# Q.Liu 26.10.2017
# Multivariate gaussian classifier implementation
import numpy as np
class GaussianClassifier(object):
def __init__(self):
self.classes = None
self.num_classes = None
self.num_features = None
self.prior = None
self.class_means = None
self.class_covmatrix = None
self.confusion_matrix = None
self.clf_img = None
self.posterior_imgs = None
def train(self, features, mask):
self.classes = np.unique(mask)[1:]
self.num_classes = len(self.classes)
self.num_features = len(features)
self.prior = np.ones(self.num_classes)/float(self.num_classes)
self.class_means = np.zeros([self.num_classes,self.num_features])
self.class_covmatrix = np.zeros([self.num_classes,
self.num_features,
self.num_features])
for c in range(self.num_classes):
num_pixels = np.count_nonzero(mask == self.classes[c])
samples = np.zeros([self.num_features, num_pixels])
for i in range(self.num_features):
samples[i,:] = features[i][np.nonzero(mask == self.classes[c])]
self.class_means[c, :] = np.mean(samples,axis=1)
self.class_covmatrix[c,:,:] = np.cov(samples)
def discriminant(self, feature_vector):
np.seterr(divide='ignore', invalid='ignore')
disrims = np.zeros(self.num_classes)
log_post = np.zeros(self.num_classes)
for c in range(self.num_classes):
log_pc = np.log(self.prior[c])
det_covmatrix = np.linalg.det(self.class_covmatrix[c,:,:])
log_det = np.log(det_covmatrix)
mean_diff = feature_vector - self.class_means[c,:]
inv_cov = np.linalg.inv(self.class_covmatrix[c,:,:])
gauss_kernel = 1/2*np.dot(mean_diff,np.dot(inv_cov,mean_diff))
disrims[c] = log_pc - 1/2*log_det - gauss_kernel
log_post[c] = disrims[c] - self.num_classes/2*np.log(2*np.pi)
posteriors = np.exp(log_post)
posteriors /= | np.sum(posteriors) | numpy.sum |
# ------------------------------------------------------------------------------
# Copyright (c) ETRI. All rights reserved.
# Licensed under the BSD 3-Clause License.
# This file is part of Youtube-Gesture-Dataset, a sub-project of AIR(AI for Robots) project.
# You can refer to details of AIR project at https://aiforrobots.github.io
# Written by <NAME> (<EMAIL>)
# ------------------------------------------------------------------------------
from scipy.signal import savgol_filter
import numpy as np
from scipy.stats import circvar
def normalize_skeleton(data, resize_factor=None):
def distance(x1, y1, x2, y2):
return np.sqrt((x1 - x2) ** 2 + (y1 - y2) ** 2)
anchor_pt = (data[1 * 2], data[1 * 2 + 1]) # neck
if resize_factor is None:
neck_height = float(abs(data[1] - data[1 * 2 + 1]))
shoulder_length = distance(data[1 * 2], data[1 * 2 + 1], data[2 * 2], data[2 * 2 + 1]) + \
distance(data[1 * 2], data[1 * 2 + 1], data[5 * 2], data[5 * 2 + 1])
resized_neck_height = neck_height / float(shoulder_length)
if resized_neck_height > 0.6:
resize_factor = shoulder_length * resized_neck_height / 0.6
else:
resize_factor = shoulder_length
normalized_data = data.copy()
for i in range(0, len(data), 2):
normalized_data[i] = (data[i] - anchor_pt[0]) / resize_factor
normalized_data[i + 1] = (data[i + 1] - anchor_pt[1]) / resize_factor
return normalized_data, resize_factor
class MotionPreprocessor:
def __init__(self, skeletons):
self.skeletons = np.array(skeletons)
self.filtering_message = "PASS"
def get(self):
assert (self.skeletons is not None)
# filtering
if self.has_missing_frames():
self.skeletons = []
self.filtering_message = "too many missing frames"
# fill missing joints
if self.skeletons != []:
self.fill_missing_joints()
if self.skeletons is None or np.isnan(self.skeletons).any():
self.filtering_message = "failed to fill missing joints"
self.skeletons = []
# filtering
if self.skeletons != []:
if self.is_static():
self.skeletons = []
self.filtering_message = "static motion"
elif self.has_jumping_joint():
self.skeletons = []
self.filtering_message = "jumping joint"
# preprocessing
if self.skeletons != []:
self.smooth_motion()
is_side_view = False
self.skeletons = self.skeletons.tolist()
for i, frame in enumerate(self.skeletons):
del frame[2::3] # remove confidence values
self.skeletons[i], _ = normalize_skeleton(frame) # translate and scale
# assertion: missing joints
assert not np.isnan(self.skeletons[i]).any()
# side view check
if (self.skeletons[i][0] < min(self.skeletons[i][2 * 2],
self.skeletons[i][5 * 2]) or
self.skeletons[i][0] > max(self.skeletons[i][2 * 2],
self.skeletons[i][5 * 2])):
is_side_view = True
break
if len(self.skeletons) == 0 or is_side_view:
self.filtering_message = "sideview"
self.skeletons = []
return self.skeletons, self.filtering_message
def is_static(self, verbose=False):
def joint_angle(p1, p2, p3):
v1 = p1 - p2
v2 = p3 - p2
ang1 = np.arctan2(*v1[::-1])
ang2 = np.arctan2(*v2[::-1])
return | np.rad2deg((ang1 - ang2) % (2 * np.pi)) | numpy.rad2deg |
# Copyright (c) 2013 <NAME>, <EMAIL>.lucchese at gmail.com
#
# This software is provided 'as-is', without any express or implied
# warranty. In no event will the authors be held liable for any damages
# arising from the use of this software.
#
# Permission is granted to anyone to use this software for any purpose,
# including commercial applications, and to alter it and redistribute it
# freely, subject to the following restrictions:
#
# 1. The origin of this software must not be misrepresented; you must not
# claim that you wrote the original software. If you use this software
# in a product, an acknowledgment in the product documentation would be
# appreciated but is not required.
#
# 2. Altered source versions must be plainly marked as such, and must not be
# misrepresented as being the original software.
#
# 3. This notice may not be removed or altered from any source
# distribution.
import math
import numpy
import matplotlib
import matplotlib.path as mpath
import matplotlib.patches as mpatches
from math2D import *
from target import Target
class TargetData():
def __init__(self, target_id, pos, vel, angle, area_id):
assert isinstance(pos, Point2D)
assert isinstance(vel, Point2D)
assert numeric_real(angle)
assert angle == normalize_angle(angle)
assert area_id
self.id = target_id
self.pos = pos
self.vel = vel
self.angle = angle
self.area_id = area_id
class Camera():
def __init__(self, orig, rot, radius, fullfov, camid="", bpoints=[], fovpoints=[]):
assert isinstance(orig, Point2D)
assert numeric_real(rot)
assert numeric_real(radius)
assert radius > 0.
assert numeric_real(fullfov)
assert fullfov > 0. and fullfov < math.pi
self._targetdata = {}
self._last_targetdata = {}
self._dt = None
self._orig = orig
self._rot = normalize_angle(rot)
self._rotl = normalize_angle(rot - fullfov/2.)
self._roth = normalize_angle(rot + fullfov/2.)
self._radius = radius
self._radius2 = radius**2
self._fullfov = normalize_angle(fullfov)
self._id = camid
self._bangles = []
self._bpoints = bpoints
self._compute_bangles(bpoints)
self._fovpoints = fovpoints
# plot primitives
self._fullcoverage_patch = self._coverage_patch(rot, fullfov, radius, (0.65,0.65,0.65))
self._patches = []
self._lines = []
self._badge = self._camera_badge()
#def _local_angle(self, p):
# compute the angle in local coordinates with respect to the camera
# depth axis
#return normalize_angle(self._angle(p) - self._rot)
def _angle(self, p):
assert isinstance(p, Point2D)
# compute the angle in global coordinates with respect to the camera
# depth axis
return Line2D(self._orig, p).angle()
def _append_bangle(self, angle):
assert angle == normalize_angle(angle)
# check if the bisecting line actually bisects the coverage area
if not normalized_angle_in_range(angle, self._rotl, self._roth):
return
bangles = self._bangles
bangles.append(angle)
# reorder the list of bangles to make detection easier later on
if self._rotl >=0 and self._roth <= 0:
# in this case we must sort positive and negative angles separately
posi = []
nega = []
for a in bangles:
if a >= 0:
posi.append(a)
else:
nega.append(a)
bangles = sorted(posi) + sorted(nega)
else:
bangles = sorted(bangles)
# purge bisecting lines with too similar angles
if 0:
done = False
while not done:
done = True
for i in xrange(0,len(bangles)-1):
#print len(bangles), i
anglel = bangles[i]
angleh = bangles[i+1]
# pop bangles that are less than 2 degrees apart
union_rangeh = normalize_angle(anglel+math.pi/180.*2.)
#print self._id, anglel, union_rangeh, angleh, normalized_angle_in_range(angleh, anglel, union_rangeh)
if normalized_angle_in_range(angleh, anglel, union_rangeh):
#print "popping bangle at index", i+1
bangles.pop(i+1)
done = False
break
self._bangles = bangles
def _compute_bangles(self, bpoints):
if not bpoints:
return
for p in bpoints:
angle = self._angle(p)
self._append_bangle(angle)
def _camera_badge(self):
# shortcut into matplotlib namespace
Path = mpath.Path
badge_size = 0.3
points = numpy.array([
(-badge_size/2., -badge_size/2.),
( badge_size/2., -badge_size/2.),
( badge_size/2., badge_size/2.),
(-badge_size/2., badge_size/2.),
(-badge_size/2., -badge_size/2.),
])
# rotate the badge points and tranlate them to the camera origin
verts = transform_points(points, self._rot, self._orig.array())
codes = [Path.MOVETO, Path.LINETO, Path.LINETO, Path.LINETO, Path.CLOSEPOLY]
#return mpatches.PathPatch(mpath.Path(verts, codes), alpha=1, facecolor=(0.5,0.5,0.75))
#print self._orig.tuple()
return matplotlib.patches.Circle(self._orig.tuple(), radius=.2, alpha=1, facecolor=(1,1,1))
def _compute_blines(self):
# create the bisceting lines
self._lines = []
clr_blines = (1,0,0)
for angle in self._bangles:
p1 = self._orig
p2 = Point2D(self._radius, 0)
p2 = rotate_point(p2, angle)
p2.x += p1.x
p2.y += p1.y
if 0:
self._lines.append(matplotlib.lines.Line2D([p1.x, p2.x], [p1.y, p2.y], color=clr_blines, linestyle=':'))
def _update_patches(self):
# namespace shortcut for the codes below
Path = mpath.Path
# build the camera badge patch
badge_size = 0.3
points = numpy.array([
(-badge_size/2., -badge_size/2.),
( badge_size/2., -badge_size/2.),
( badge_size/2., badge_size/2.),
(-badge_size/2., badge_size/2.),
(-badge_size/2., -badge_size/2.),
])
# rotate the badge points and tranlate them to the camera origin
verts = transform_points(points, self._rot, self._orig.array())
codes = [Path.MOVETO, Path.LINETO, Path.LINETO, Path.LINETO, Path.CLOSEPOLY]
self._badge_patch = mpatches.PathPatch(mpath.Path(verts, codes), alpha=1, facecolor=(0.5,0.5,0.75))
self._badge_patch = matplotlib.patches.Circle([self._posx, self._posy], radius=.2, alpha=1, facecolor=(1,0.75,0.75))
# build the camera coverage patch
r = self._radius
w = self._fov
nr_segments = 20
points = [(0,0)]
for i in numpy.arange(0, nr_segments+1):
theta = -w/2. + i*w/float(nr_segments)
points.append((math.cos(theta), math.sin(theta)))
points.append((0,0))
points = | numpy.array(points) | numpy.array |
# -*- coding: utf-8 -*-
"""
Created on Wed Nov 4 10:53:11 2020
@author: giamm
"""
import numpy as np
import matplotlib.pyplot as plt
import random
# import math
import csv
from pathlib import Path
import datareader
# import parameters_input as inp
import plot_generator as plot
from house_load_profiler import house_load_profiler as hlp
from load_profile_aggregator_trapz import aggregator
from tictoc import tic, toc
###############################################################################
# This is the _main_ file of a routine that generates the electric load-profile
# for an aggregate of a certain number of households. The total simulation time
# is 1440 min, while the resolution (timestep) is 1 min.
# The load profile and the daily energy consumption are evaluated for a number
# of households and appliances, according to the availability of each appliance
# in each household (depending on a distribution factor and on the geographical
# location that is chosen). The load profile from all the households is then
# aggregated and the result is shown with a different time resolution (dt_aggr).
# This is done for each season, both for weekdays and weekend days. The energy
# consumption from each appliance during the whole year is also evaluated.
###############################################################################
# The basepath of the file is stored in a variable
basepath = Path(__file__).parent
## Parameters needed in the simulation
# Simulation parameters that can be changed by keyboard input
# n_hh #number of households (-)
# n_people_avg #average number of members for each household (-)
# ftg_avg #average footage of each household (m2)
# location #geographical location: 'north' | 'centre' | 'south'
# power_max #maximum power available from the grid (contractual power) (W)
# en_class #energetic class of the appiances: 'A+++' | 'A++' | 'A+' | 'A' | 'B' | 'C' | 'D'
# dt_aggr #aggregated data timestep (min) 15 | 30 | 60
# Parameters that are to be changed manually
# toll #tolerance on the displacement of the appliance's daily time-on, i.e. duration (%)
# devsta #standard deviation of the appliance's daily time-on, i.e. duration (min)
# q_max #quantile for the maximum instantaneous load profile (%)
# q_med #quantile for the medium instantaneous load profile (%)
# q_min #quantile for the minimum instantaneous load profile (%)
# time_scale #time-scale for plotting: 'min' | 'h'
# power_scale #power-scale for plotting: 'W' | 'kW' | 'MW'
# energy_scale #energy-scale for plotting: 'kWh' | 'MWh'
def aggregate_load_profiler(params, file_store_flag, fig_store_flag):
## Parameters
# # Updating the parameters according to the keyboard input by calling the parameters_input() method
# params = inp.parameters_input()
# Some more "insiders" paramters that are to be changed manually
params['devsta'] = 2
params['toll'] = 15
params['q_max'] = 80
params['q_med'] = 50
params['q_min'] = 20
# Updating the parameters' values (only for those parameters that are needed here)
# Number of households considered (-)
n_hh = params['n_hh']
# Geographical location: 'north' | 'centre' | 'south'
location = params['location']
# Energetic class of the appiances: 'A+++' | 'A++' | 'A+' | 'A' | 'B' | 'C' | 'D'
en_class = params['en_class']
# Time-step used to aggregated the results (min): 1 | 5 | 10 | 15 | 10 | 30 | 45 | 60
dt_aggr = params['dt_aggr']
# Quantile for the evaluation of maximum, medium and minimum power demands at each time-step
q_max = params['q_max']
q_med = params['q_med']
q_min = params['q_min']
### Starting the simulation
## Time
# Time discretization for the simulation
# Time-step (min)
dt = 1
# Total time of simulation (min)
time = 1440
# Vector of time from 00:00 to 23:59 (one day) (min)
time_sim = np.arange(0, time, dt)
# Time vector for the aggregation of the results (min)
time_aggr = np.arange(0, time, dt_aggr)
# Creating a dictionary to be passed to the various methods, containing the time discretization
time_dict = {
'time': time,
'dt': dt,
'time_sim': time_sim,
}
## Input data for the appliances
# Appliances' attributes, energy consumptions and user's coefficients
apps, apps_ID, apps_attr = datareader.read_appliances('eltdome_report.csv', ';', 'Input')
# apps is a 2d-array in which, for each appliance (rows) and attribute value is given (columns)
# apps_ID is a dictionary in which, for each appliance (key), its ID number,type,week and seasonal behavior (value)
# apps_attr is a dictionary in which the name of each attribute (value) is linked to its columns number in apps (key)
ec_yearly_energy, ec_levels_dict = datareader.read_enclasses('classenerg_report.csv', ';', 'Input')
# ec_yearly_energy is a 2d-array in which for each appliance, its yearly energy consumption is given for each energetic class
# ec_levels_dict is a dictionary that links each energetic level (value) to its columns number in ec_yearly_energy
coeff_matrix, seasons_dict = datareader.read_enclasses('coeff_matrix.csv',';','Input')
# coeff_matrix is a 2d-array in which for each appliance, its coefficient k, related to user's behaviour in different seasons, is given
# seasons_dict is a dictionary that links each season (value) to its columns number in coeff_matrix
# Average daily load profiles (from MICENE, REMODECE) and typical duty cycle diagrams (from CESI)
# Rather than loading the proper file each time the load_profiler method is called, all the input data are loaded here
# and stored in two dicionaries (one for the average daily load profiles and one for the typical duty cycle diagrams)
# that are passed each time to the functions
apps_avg_lps = {}
apps_dcs = {}
for app in apps_ID:
# Storing some useful variables needed to identify the file to be loaded and read
# app_nickname is a 2 or 3 characters string identifying the appliance
app_nickname = apps_ID[app][apps_attr['nickname']]
# app_type depends from the work cycle for the appliance: 'continuous'|'no_duty_cycle'|'duty_cycle'|
app_type = apps_ID[app][apps_attr['type']]
# app_wbe (weekly behavior), different usage of the appliance in each type of days: 'wde'|'we','wd'
app_wbe = apps_ID[app][apps_attr['week_behaviour']]
# app_sbe (seasonal behavior), different usage of the appliance in each season: 'sawp'|'s','w','ap'
app_sbe = apps_ID[app][apps_attr['season_behaviour']]
# Building the name of the file to be opened and read
fname_nickname = app_nickname
fname_type = 'avg_loadprof'
# Initializing the dictionary (value) related to the current appliance - app (key)
apps_avg_lps[app] = {}
# Running through different seasons if the appliance's usage changes according to the season
# if app_sbe == 'sawp' there will only be one iteration
for season in app_sbe:
fname_season = season
# Running through different day-types if the appliance's usage changes according to the day-type
# if app_wbe == 'wde' there will only be one iteration
for day in app_wbe:
fname_day = day
filename = '{}_{}_{}_{}.csv'.format(fname_type, fname_nickname, fname_day, fname_season)
# Reading the time and power vectors for the load profile
data_lp = datareader.read_general(filename,';','Input')
# Time is stored in hours and converted to minutes
time_lp = data_lp[:, 0]
time_lp = time_lp*60
# Power is already stored in Watts, it corresponds to the load profile
power_lp = data_lp[:, 1]
load_profile = power_lp
# Interpolating the load profile if it has a different time-resolution
if (time_lp[-1] - time_lp[0])/(np.size(time_lp) - 1) != dt:
load_profile = np.interp(time_sim, time_lp, power_lp, period = time)
# Storing the load profile in the proper element of the dictionary
apps_avg_lps[app][(season, day)] = load_profile
# Loading the duty cycle diagram for the appliance of "duty-cycle" type
if app_type == 'duty_cycle':
fname_type = 'dutycycle'
filename = '{}_{}.csv'.format(fname_type, fname_nickname)
# Reading the time and power vectors for the duty cycle
data_dc = datareader.read_general(filename, ';', 'Input')
# Time is already stored in minutes
time_dc = data_dc[:, 0]
# Power is already stored in Watts, it corresponds to the duty cycle
power_dc = data_dc[:, 1]
duty_cycle = power_dc
# Interpolating the duty-cycle, if it has a different time resolution
if (time_dc[-1] - time_dc[0])/(np.size(time_dc) - 1) != dt:
time_dc = np.arange(time_dc[0], time_dc[-1] + dt, dt)
duty_cycle = np.interp(time_dc, data_dc[:, 0], power_dc)
# Storing time and power vectors of the duty cycle
apps_dcs[app] = {'time_dc': time_dc,
'duty_cycle': duty_cycle}
# Creating a dictionary to pass such data to the various methods
appliances_data = {
'apps': apps,
'apps_ID': apps_ID,
'apps_attr': apps_attr,
'ec_yearly_energy': ec_yearly_energy,
'ec_levels_dict': ec_levels_dict,
'coeff_matrix': coeff_matrix,
'seasons_dict': seasons_dict,
'apps_avg_lps': apps_avg_lps,
'apps_dcs': apps_dcs,
}
## Building the appliances availability matrix
# A 2d-array is built in which, for each household (columns) it is
# shown which appliances are available for the household, according to the
# distribution factor of each appliance in a given location (1 if available, 0 otherwise)
# Initializing the array
apps_availability = np.zeros((len(apps_ID),n_hh))
number_of_apps = np.zeros((len(apps_ID)))
# A dictionary that relates each location (key) to the related columns in apps(for distribution factors)
location_dict = {
'north': apps_attr['distribution_north'] - (len(apps_attr) - np.size(apps, 1)),
'centre': apps_attr['distribution_centre'] - (len(apps_attr) - np.size(apps, 1)),
'south': apps_attr['distribution_south'] - (len(apps_attr) - np.size(apps, 1)),
}
# Building the matrix
for app in apps_ID:
# The ID number of the appliance is stored in a variable since it will be used man times
app_ID = apps_ID[app][apps_attr['id_number']]
# Extracting the distribution factor for the appliance in the current geographical location
distr_fact = apps[app_ID, location_dict[location]]
# Evaluating the number of households in which the appliance is available
n_apps_app_type = int(np.round(n_hh*distr_fact))
number_of_apps[app_ID] = n_apps_app_type
# Extracting randomly the households in which the appliance is available, from the total number of households
samp = random.sample(list(range(0, n_hh)), n_apps_app_type)
# Assigning the appliance's availability to the households present in the sample
apps_availability[apps_ID[app][0],samp] = 1
## Building seasons and week dictionaries
# This is done in order to explore all the seasons and, for each season two types of days (weekday and weekend)
seasons = {'winter': (0, 'w'), 'spring': (1, 'ap'), 'summer': (2, 's'), 'autumn': (3, 'ap')}
days = {'week-day': (0, 'wd'), 'weekend-day': (1, 'we')}
# A reference year is considered, in which the first day (01/01) is a monday.
# Therefore, conventionally considering that winter lasts from 21/12 to 20/03,
# spring from 21/03 to 20/06, summer from 21/06 to 20/09 and winter from 21/09
# to 20/12, each seasons has got the following number of weekdays and weekend days.
days_distr = {'winter': {'week-day': 64, 'weekend-day': 26},
'spring': {'week-day': 66, 'weekend-day': 26},
'summer': {'week-day': 66, 'weekend-day': 26},
'autumn': {'week-day': 65, 'weekend-day': 26}
}
### Evaluating the load profiles for all the households
# The aggregated load profiles are evaluated for both a week day and a weekend
# day for the four seasons and the seasonal energy consumption from each
# appliance, for each household are evaluated.
# First, some quantities are initialized, that will be useful for storing the results
# Quantile are evaluated for the load profiles. It means that for each timestep
# the maximum power (demanded by less than 15% of the households), the median
# power (demanded by less than 50% of the households) and the minimum (demanded 7
# by less than 85% of the households).
nmax = min(int(np.round(n_hh*q_max/100)), n_hh)
nmed = int(np.round(n_hh*q_med/100))
nmin = int(np.round(n_hh*q_min/100))
# A random sample of n_samp houses is also extracted in order to plot, for each
# of them, the load profile during one day for each season, for each day-type.
n_samp = 5
if n_hh < n_samp: n_samp = n_hh
# A random sample is extracted from the total number of households
samp = random.sample(list(range(0, n_hh)), n_samp)
# A list where to store the header for a .csv file is initialized
sample_lp_header = samp
# Storing some useful quantities into variables
n_seasons = len(seasons)
n_days = len(days)
n_apps = len(apps_ID)
n_time_sim = np.size(time_sim)
n_time_aggr = np.size(time_aggr)
# Specifying which quantities (load profiles) are going to be stored and plotted
load_profiles_types = {
0: 'Total',
1: 'Average',
2: 'Maximum',
3: 'Medium',
4: 'Minimum',
}
lps_header = ['{} load (W)'.format(load_profiles_types[lp]) for lp in load_profiles_types]
# Creating 3d-arrays where to store each type of load profile, for each season (axis = 0),
# each time-step (axis = 1) and type of day (axis = 2)
lp_tot_stor = np.zeros((n_seasons, n_time_aggr, n_days))
lp_avg_stor = np.zeros((n_seasons, n_time_aggr, n_days))
lp_max_stor = np.zeros((n_seasons, n_time_aggr, n_days))
lp_med_stor = | np.zeros((n_seasons, n_time_aggr, n_days)) | numpy.zeros |
import torch
import torchvision
import torchvision.transforms as transforms
import torch.utils.data.dataloader as dataloader
from torch.utils.data import Subset,Dataset
import torch.nn as nn
import torch.optim as optim
from torch.nn.parameter import Parameter
import numpy as np
import network as net
from random import shuffle
import random
import data
import os
import config as cfg
import time
def setup_seed(seed):
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
random.seed(seed)
torch.backends.cudnn.deterministic = True
class LoadData(Dataset):
def __init__(self,train_x,train_y):
self.train_x = train_x
self.train_y = train_y
self.len = len(self.train_x)
def __getitem__(self,index):
return self.train_x[index],self.train_y[index]
def __len__(self):
return self.len
def train_and_test(class_name,train_loader,test_loader,num_classes,length):
epoches = 500 #
lr = 0.0001 #
input_num = 1
output_num = num_classes
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
model = net.BasicFCN(input_num,num_classes,length)
model.to(device)
loss_func = nn.CrossEntropyLoss() #
optimizer = optim.Adam(model.parameters(), lr=lr) #
#optimizer = optim.SGD(model.parameters(), lr=lr)
SOTA = 0.0
model_Tstate = None
for epoch in range(epoches):
for images, labels in train_loader:
images = images.to(device)
labels = labels.to(device)
output = model(images)
loss = loss_func(output, labels)
optimizer.zero_grad()
loss.backward()
optimizer.step()
correct = 0
total = 0
model.eval()
with torch.no_grad():
for images, labels in test_loader:
images = images.to(device)
labels = labels.to(device)
output = model(images)
values, predicte = torch.max(output, 1)
total += labels.size(0)
correct += (predicte == labels).sum().item()
if (correct/total) > SOTA:
SOTA = correct / total
#print("The {} accuracy of epoch {} TSC: {}%".format(class_name,epoch+1, 100 * correct / total))
#torch.save(model.state_dict(),"FedTemp/"+class_name+".pkl")
model_Tstate = model
return str(SOTA), model_Tstate
def train_and_test_load(class_name,train_loader,test_loader,num_classes,length,PreviousModel):
epoches = 500 #
lr = 0.0001 #
input_num = 1
output_num = num_classes
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
model = net.OrdinaryTransferFCN(input_num,num_classes,length,PreviousModel)
model.to(device)
loss_func = nn.CrossEntropyLoss() #
optimizer = optim.Adam(model.parameters(), lr=lr) #
#optimizer = optim.SGD(model.parameters(), lr=lr)
SOTA = 0.0
model_Rstate = None
for epoch in range(epoches):
for images, labels in train_loader:
images = images.to(device)
labels = labels.to(device)
output = model(images)
loss = loss_func(output, labels) #+ 1*net.GetWCLoss(PreviousModel,model)
optimizer.zero_grad()
loss.backward()
optimizer.step()
correct = 0
total = 0
model.eval()
with torch.no_grad():
for images, labels in test_loader:
images = images.to(device)
labels = labels.to(device)
output = model(images)
values, predicte = torch.max(output, 1)
total += labels.size(0)
correct += (predicte == labels).sum().item()
if (correct/total) > SOTA:
SOTA = correct / total
#print("The {} accuracy of epoch {} TSC: {}%".format(class_name,epoch+1, 100 * correct / total))
#torch.save(model.state_dict(),"basicFCN/"+class_name+".pkl")
model_Rstate = model
return str(SOTA), model_Rstate
setup_seed(123)
names = cfg.each_elen_dir_name
start_time = time.time()
numbers = len(names)
logTxt = "ContinalLearning.txt"
f = open(logTxt,mode="a+")
f.writelines("Ordinary Transfer Learning Algorithms------------1\n")
f.close()
avg = 0
for i in range(len(names)):
logTxt = "ContinalLearning.txt"
f = open(logTxt,mode="a+")
classname = names[i]
x_train = | np.load("data/"+classname+"_Xtrain.npy") | numpy.load |
# from __future__ import division
#-------------------------------------
#
# Started at 06/08/2018 (YuE)
#
# This script based on the previous script
# threeApproachesComparison_v6.py
#
## Upgraded version of python (python3.4): script was rewritten to take into
# account some differences in the descriptions and using of some functions
# (version cma_v3 and more earlier scripts are written under python2).
#
# 07/24/2018: IT IS NOT FINISHED:
#
# Which are still unsatisfactory:
# 1) the absolute values of frictional forces for all methods of calculation,
# 2) their dependence on the ion velocity.
#
# But nevertheless, the dependences of the transmitted energy on the impact
# parameter are close to the inverse quadratic (as it should be!) at all velocities.
#
# 07/27/2018: IT IS NOT FINISHED:
#
# Which are still unsatisfactory:
# 1) the absolute values of frictional forces for all methods of calculation,
# 2) their dependence on the ion velocity.
# The investigation of that is in progress.
#
# Some features were improved, some figures were corrected.
#
#-------------------------------------
#========================================================
#
# This code compairs two approaches: "classical" (from [1]) and
# "magnus" (from [2]).
#
# For "classical" approach the magnetized interaction between ion
# and electron is considered for ion velocities V_i > rmsTrnsvVe.
#
# References:
#
# [1] <NAME>, <NAME>, <NAME>, <NAME>.
# "Physics guide of BETACOOL code. Version 1.1". C-A/AP/#262, November
# 2006, Brookhaven National Laboratory, Upton, NY 11973.
# [2] <NAME>, <NAME>. "New Algorithm for Dynamical Friction
# of Ions in a Magnetized Electron Beam". AIP Conf. Proc. 1812, 05006 (2017).
#
#========================================================
#########################################################
#
# Main issues of the calculations:
#
# 1) Friction force (FF) is calculated in the (P)article (R)est (F)rame,
# i.e. in the frame moving together with both (cooled and cooling)
# beams at a velocity V0;
# 2) Friction force is calculated for each value of ion velocity
# in the interval from .1*rmsTrnsvVe till 10*rmsTrnsvVe;
# 3) Initially assumped that all electrons have a logitudinal
# velocity rmsLongVe and transversal velocity rmsTrnsvVe;
# 4) For each ion velocity the minimal and maximal values of the
# impact parameter are defined. Radius of the shielding of the
# electric field of the ion equals to the value of the maximal
# impact parameter;
# 5) For each impact parameter in the interval from minimal till
# maximal values the transfered momenta deltap_x,y,z are
# calculated;
# 6) Founded transfered momenta allow to calculate the transfered
# energy delta_E =deltap^2/(2*m_e) and to integrate it over
# impact parameter; then (expressions (3.4), (3.5) from [1]):
# FF =-2*pi*n_e*integral_rhoMin^rhoMax delta_E*rho*drho;
# 7) For taking into account the velocity distribution of the
# electrons it is necessary to repeat these calculations for
# each value of the electron's velocity and then integrate result
# over distribution of the velocities.
#
# 10/26/2018:
#
# 8) Item 6 is wrong and correct expression for transfered
# energy delta_E will be used;
# 9) Method (my own) Least Squares Method - LSM is used to fit the
# dependence of transferred momenta on impact parameter;
#
#
# 11/08/2018:
#
# 10) Two functions ('fitting' and 'errFitAB' are defined to realize
# my LSM to find the parameters of the fitting end error of this
# fitting;
#
# 11) Analys of different dependeces between values; graphical
# presentation of these dependences;
#
#########################################################
import os, sys
import numpy as np
import math
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from matplotlib.colors import LogNorm
from matplotlib import ticker
from matplotlib import markers
import matplotlib as mpl
from matplotlib import cm
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
from matplotlib.legend_handler import HandlerLine2D
import scipy.integrate as integrate
from scipy.integrate import quad, nquad, dblquad
from scipy.constants import pi
from scipy import optimize
from statistics import mean
from array import array
#
# All physical constants have its dimension in units in the system CI.
# This code uses units in the system CGS!
#
from scipy.constants import speed_of_light as clight
from scipy.constants import epsilon_0 as eps0
from scipy.constants import mu_0 as mu0
from scipy.constants import elementary_charge as qe
from scipy.constants import electron_mass as me
from scipy.constants import proton_mass as mp
from scipy.constants import Boltzmann as kB
pi=3.14159265358
#
# Physical constants:
#
m_e=9.10938356e-28 # electron mass, g
m_elec=m_e # to keep variable from previous script
m_p=1.672621898e-24 # electron mass, g
M_ion = m_p # to keep variable from previous script
q_e=4.803204673e-10 # electron charge, CGSE unit: sqrt(g*cm^3/sec^2)
q_elec=q_e # to keep variable from previous script
Z_ion = q_e # to keep variable from previous script
cLight=2.99792458e10 # speed of light, cm/sec
eVtoErg=1.6021766208e-12 # 1 eV = 1.6...e-12 erg
CtoPart=2.99792458e9 # 1 C = 1 A*sec = 2.9...e9 particles
m_e_eV = m_e*cLight**2/eVtoErg
#
# Electron beam parameters:
#
Ekin=3.0e4 # kinetic energy, eV
curBeam=0.5 # current density, A/cm^2
dBeam=3.0 # beam diameter, cm
angSpread=3.0 # angular spread, mrad
trnsvT=0.5 # transversal temperature, eV
longT=2.0e-4 # longitudinal temperature, eV (was 2.0e-4)
nField=1 # number ov values of the magnetic field
fieldB=np.zeros(nField) # magnetic field
fieldB[0]=3.e3 # Gs
omega_p=1.0e9 # plasma frequency, 1/sec
n_e=omega_p**2*m_e/(4.*pi*q_e**2) # plasma density, 3.1421e+08 cm-3
n_e1=8.e7 # plasma density, cm-3
omega_p1=np.sqrt(4.*pi*n_e1*q_e**2/m_e) # plasma frequency, 5.0459e+08 1/s
#
# Cooling system parameter:
#
coolLength=150.0 # typical length of the coolong section, cm
#
# HESR:
#
Ekin=90.8e4 # HESR kinetic energy, eV
curBeam=0.5 # HESR current beam, A
dBeam=2.0 # HESR beam diameter, cm
angSpread=0.0 # HESR angular spread, mrad
trnsvT=0.2 # HESR transversal temperature, eV
longT=1.0e-2 # HESR longitudinal temperature, eV (was 2.0e-4)
fieldB[0]=1.e3 # HESR, Gs
coolLength=270.0 # HESR typical length of the coolong section, cm
#
# EIC:
#
angSpread=0.0 # EIC angular spread, mrad
fieldB[0]=5.e4 # EIC, Gs
coolLength=300.0 # EIC typical length of the coolong section, cm
#
# Calculated parameters of the electron beam:
#
V0 = cLight*np.sqrt(Ekin/m_e_eV*(Ekin/m_e_eV+2.))/(Ekin/m_e_eV+1.)
print ('V0 =%e' % V0)
tetaV0=0. # angle between V0 and magnetic field, rad
B_mag=fieldB[0]*np.cos(tetaV0) # magnetic field acting on an electron, Gs
rmsTrnsvVe=np.sqrt(2.*trnsvT*eVtoErg/m_e) # RMS transversal velocity, cm/s
rmsLongVe=np.sqrt(2.*longT*eVtoErg/m_e) # RMS longitudinal velocity, cm/s
# HESR:
dens=curBeam*(CtoPart/q_e)/(pi*(.5*dBeam)**2*V0) # density, 1/cm^3
omega=np.sqrt(4.*pi*dens*q_e**2/m_e) # plasma frequency, 1/s
n_e=dens
omega_p=omega
print ('HESR: dens = %e,omega_p = %e' % (dens,omega_p))
# EIC:
rmsLongVe = 1.0e+7 # cm/s
longT = .5*m_e*rmsLongVe**2/eVtoErg
rmsTrnsvVe = 4.2e+7 # cm/s
trnsvT = .5*m_e*rmsTrnsvVe**2/eVtoErg
print ('EIC: rmsLongVe = %e, longT = %e, rmsTrnsvVe = %e, trnsvT = %e' % \
(rmsLongVe,longT,rmsTrnsvVe,trnsvT))
dens=2.e9 # density, 1/cm^3
omega=np.sqrt(4.*pi*dens*q_e**2/m_e) # plasma frequency, 1/s
n_e=dens
omega_p=omega
print ('EIC: dens = %e,omega_p = %e' % (dens,omega_p))
cyclFreq=q_e*B_mag/(m_e*cLight) # cyclotron frequency, 1/s
rmsRoLarm=rmsTrnsvVe*cyclFreq**(-1) # RMS Larmor radius, cm
dens=omega_p**2*m_e/(4.*pi*q_e**2) # density, 1/cm^3
likeDebyeR=(3./dens)**(1./3.) # "Debye" sphere with 3 electrons, cm
eTempTran=trnsvT # to keep variable from previous script
eTempLong=longT # to keep variable from previous script
coolPassTime=coolLength/V0 # time pass through cooling section, cm
thetaVi=0. # polar angle ion and cooled electron beams, rad
phiVi=0. # azimuth angle ion and cooled electron beams, rad
powV0=round(np.log10(V0))
mantV0=V0/(10**powV0)
pow_n_e=round(np.log10(n_e))
mant_n_e=n_e/(10**pow_n_e)
#
# Formfactor ffForm for friction force:
#
# ffForm = 2*pi*dens*q_e**4/(m_e*V0**2)=
# = 0.5*omega_p**2*q_e**2/V0**2
#
# Dimension of ffForm is force: g*cm/sec**2=erg/cm
#
# 1 MeV/m = 1.e6*eVtoErg/100. g*cm/sec**2 = 1.e4*eVtoErg erg/cm
MeV_mToErg_cm=1.e4*eVtoErg
# ffForm=-.5*omega_p**2*q_e**2/V0**2/MeV_mToErg_cm # MeV/m
eV_mToErg_m=100.*eVtoErg
# ffForm=-.5*omega_p**2*q_e**2/V0**2/eV_mToErg_m # =-6.8226e-12 eV/m
eV_mInErg_cm=100.*eVtoErg
ffForm=-.5*omega_p**2*q_e**2/V0**2/eVtoErg # =-6.8226e-10 eV/cm
ffForm=100.*ffForm # =-6.8226e-08 eV/m
ergToEV = 1./1.60218e-12
#
# Relative velocities of electrons:
#
relVeTrnsv=rmsTrnsvVe/V0
relVeLong=rmsLongVe/V0
print ('V0=%e cm/s, rmsTrnsvVe=%e cm/s (rel = %e), rmsLongVe=%e cm/s (rel = %e)' % \
(V0,rmsTrnsvVe,relVeTrnsv,rmsLongVe,relVeLong))
# Indices:
(Ix, Ipx, Iy, Ipy, Iz, Ipz) = range(6)
stepsNumberOnGyro = 25 # number of the steps on each Larmour period
'''
#
# Opening the input file:
#
inputFile='areaOfImpactParameter_tAC-v6_fig110.data'
print ('Open input file "%s"...' % inputFile)
inpfileFlag=0
try:
inpfile = open(inputFile,'r')
inpfileFlag=1
except:
print ('Problem to open input file "%s"' % inputFile)
if inpfileFlag == 1:
print ('No problem to open input file "%s"' % inputFile)
lines=0 # Number of current line from input file
dataNumber=0 # Number of current value of any types of Data
xAboundary=np.zeros(100)
xBboundary=np.zeros(100)
while True:
lineData=inpfile.readline()
# print ('line=%d: %s' % (lines,lineData))
if not lineData:
break
lines += 1
if lines > 4:
words=lineData.split()
nWords=len(words)
# print ('Data from %d: words=%s, number of entries = %d' % (lines,words,nWords))
xAboundary[dataNumber]=float(words[0])
xBboundary[dataNumber]=float(words[1])
dataNumber += 1
inpfile.close()
print ('Close input file "%s"' % inputFile)
'''
#====================================================================
#
#------------------ Begin of defined functions -----------------------
#
# Larmor frequency electron:
#
def omega_Larmor(mass,B_mag):
return (q_elec)*B_mag/(mass*clight*1.e+2) # rad/sec
#
# Derived quantities:
#
omega_L = omega_Larmor(m_elec,B_mag) # rad/sec
T_larm = 2*pi/omega_L # sec
timeStep = T_larm/stepsNumberOnGyro # time step, sec
print ('omega_Larmor= %e rad/sec, T_larm = %e sec, timeStep = %e sec' % \
(omega_L,T_larm,timeStep))
nLarmorAvrgng=10 # number of averaged Larmor rotations
#
# Data to integrate transferred momemta over the track:
#
timeStep_c=nLarmorAvrgng*stepsNumberOnGyro*timeStep # sec
print ('timeStep_c = %e s' % timeStep_c)
eVrmsTran = np.sqrt(2.*eTempTran*eVtoErg/m_elec) # cm/sec
eVrmsLong = np.sqrt(2.*eTempLong*eVtoErg/m_elec) # cm/sec
kinEnergy = m_elec*(eVrmsTran**2+eVrmsLong**2)/2. # kinetic energy; erg
print ('eVrmsTran = %e cm/sec, eVrmsLong = %e cm/sec, kinEnergy = %e eV' % \
(eVrmsTran,eVrmsLong,ergToEV*kinEnergy))
ro_larmRMS = eVrmsTran/omega_L # cm
print ('ro_larmRMS =%e mkm' % (1.e4*ro_larmRMS))
#
# Electrons are magnetized for impact parameter >> rhoCrit:
#
rhoCrit=math.pow(q_elec**2/(m_elec*omega_L**2),1./3) # cm
print ('rhoCrit (mkm) = ' , 1.e+4*rhoCrit)
#
# Convertion from 6-vector of relectron's "coordinates" to 6-vector
# of guiding-center coordinates:
# z_e=(x_e,px_e,y_e,py_e,z_e,pz_e) --> zgc_e=(phi,p_phi,y_gc,p_gc,z_e,pz_e);
#
def toGuidingCenter(z_e):
mOmega=m_elec*omega_L # g/sec
zgc_e=z_e.copy() # 6-vector
zgc_e[Ix] = np.arctan2(z_e[Ipx]+mOmega*z_e[Iy],z_e[Ipy]) # radians
zgc_e[Ipx]= (((z_e[Ipx]+mOmega*z_e[Iy])**2+z_e[Ipy]**2)/(2.*mOmega)) # g*cm**2/sec
zgc_e[Iy] =-z_e[Ipx]/mOmega # cm
zgc_e[Ipy]= z_e[Ipy]+mOmega*z_e[Ix] # g/sec
return zgc_e
#
# Convertion from 6-vector of guiding-center coordinates to 6-vector
# of electron's "coordinates":
# zgc_e=(phi,p_phi,y_gc,p_gc,z_e,pz_e) --> z_e=(x_e,px_e,y_e,py_e,z_e,pz_e);
#
def fromGuidingCenter(zgc_e):
mOmega=m_elec*omega_L # g/sec
rho_larm=np.sqrt(2.*zgc_e[Ipx]/mOmega) # cm
z_e = zgc_e.copy() # 6-vector
z_e[Ix] = zgc_e[Ipy]/mOmega-rho_larm*np.cos(zgc_e[Ix]) # cm
z_e[Ipx]=-mOmega*zgc_e[Iy] # g*cm/sec
z_e[Iy] = zgc_e[Iy]+rho_larm*np.sin(zgc_e[Ix]) # cm
z_e[Ipy]= mOmega*rho_larm*np.cos(zgc_e[Ix]) # g*cm/sec
return z_e
#
# Matrix to dragg electron through the solenoid with field 'B_mag'
# during time interval 'deltaT':
#
def solenoid_eMatrix(B_mag,deltaT):
slndMtrx=np.identity(6)
omega_L=omega_Larmor(m_elec,B_mag) # rad/sec
mOmega= m_elec*omega_L # g/sec
phi=omega_L*deltaT # phase, rad
cosPhi=math.cos(phi) # dimensionless
sinPhi=math.sin(phi) # dimensionless
cosPhi_1=2.*math.sin(phi/2.)**2 # dimensionless
slndMtrx[Iy, Iy ]= cosPhi # dimensionless
slndMtrx[Ipy,Ipy]= cosPhi # dimensionless
slndMtrx[Iy, Ipy]= sinPhi/mOmega # sec/g
slndMtrx[Ipy,Iy ]=-mOmega*sinPhi # g/sec
slndMtrx[Iz, Ipz]= deltaT/m_elec # sec/g
slndMtrx[Ix, Ipx]= sinPhi/mOmega # sec/g
slndMtrx[Ix, Iy ]= sinPhi # dimensionless
slndMtrx[Ix, Ipy]= cosPhi_1/mOmega # sec/g
slndMtrx[Iy, Ipx]=-cosPhi_1/mOmega # sec/g
slndMtrx[Ipy,Ipx]=-sinPhi # dimensionless
return slndMtrx
#
# Matrix to dragg particle through the drift during time interval 'deltaT':
#
def drift_Matrix(M_prtcl,deltaT):
driftMtrx = np.identity(6)
for i in (Ix,Iy,Iz):
driftMtrx[i,i+1]=deltaT/M_prtcl # sec/g
return driftMtrx
#
# Matrix to dragg electron in the "guiding center" system during time interval 'deltaT':
#
def guidingCenter_Matrix(deltaT):
gcMtrx = np.identity(6)
gcMtrx[Iz,Ipz]=deltaT/m_elec # sec/g
return gcMtrx
#
# Description of the collision during time interval 'deltaT'
# in the system coordinates of "guiding center" of electron
# input - 6-vectors for electron and ion before collision and time step deltaT;
# output - transfered momenta to ion and electron:
#
def guidingCenterCollision(vectrElec_gc,vectrIon,deltaT):
dpIon=np.zeros(3)
dpElec=np.zeros(3)
mOmegaLarm=m_elec*omega_L # g/sec
dpFactor_gc=q_elec**2 # g*cm^3/sec^2
rhoLarm_gc=np.sqrt(2.*vectrElec_gc[1]/mOmegaLarm) # cm
sinOmega_gc=math.sin(vectrElec_gc[0])
cosOmega_gc=math.cos(vectrElec_gc[0])
x_gc=vectrElec_gc[3]/mOmegaLarm # cm
numer=(vectrIon[0]-x_gc)*cosOmega_gc- \
(vectrIon[2]-vectrElec_gc[2])*sinOmega_gc # cm
denom=((vectrIon[0]-x_gc)**2+(vectrIon[2]-vectrElec_gc[2])**2+ \
(vectrIon[4]-vectrElec_gc[4])**2+rhoLarm_gc**2)**(3/2) # cm^3
action=vectrElec_gc[1]+dpFactor_gc*numer*rhoLarm_gc/(omega_L*denom) # g*cm^2/sec
b_gc=np.sqrt((vectrIon[0]-x_gc)**2+ \
(vectrIon[2]-vectrElec_gc[2])**2+ \
(vectrIon[4]-vectrElec_gc[4])**2+2.*action/mOmegaLarm) # cm
# Dimensions of dpIon, deElec are g*cm/sec:
dpIon[0]=-dpFactor_gc*deltaT*(vectrIon[0]-x_gc)/b_gc**3
dpIon[1]=-dpFactor_gc*deltaT*(vectrIon[2]-vectrElec_gc[2])/b_gc**3
dpIon[2]=-dpFactor_gc*deltaT*(vectrIon[4]-vectrElec_gc[4])/b_gc**3
dpElec[0]=-dpIon[0]
dpElec[1]=-dpIon[1]
dpElec[2]=-dpIon[2]
# print ('dpIon[0]=%e, dpIon[1]=%e, dpIon[2]=%e' % \
# (dpIon[0],dpIon[1],dpIon[2]))
return dpIon,dpElec,action,b_gc
#
# "Magnus expansion" description of the collision during time interval 'deltaT'
# in the system coordinates of "guiding center" of electron
# input - 6-vectors for electron and ion before collision and time step deltaT;
# output - transfered momenta to ion and electron and electron y_gc coordinate
# as well calculated parameters C1,C2,C3,b,D1,D2,q for testing:
#
def MagnusExpansionCollision(vectrElec_gc,vectrIon,deltaT):
# print ('Ion: x=%e, y=%e, z=%e' % (vectrIon[0],vectrIon[2],vectrIon[4]))
# print ('Electron: x=%e, y=%e, z=%e' %
# (vectrElec_gc[0],vectrElec_gc[4],vectrElec_gc[4]))
dpIon=np.zeros(3)
dpElec=np.zeros(3)
mOmegaLarm=m_elec*omega_L # g/sec
dpFactor_gc=q_elec**2 # g*cm^3/sec^2
rhoLarm_gc=np.sqrt(2.*vectrElec_gc[1]/mOmegaLarm) # cm
sinOmega_gc=math.sin(vectrElec_gc[0])
cosOmega_gc=math.cos(vectrElec_gc[0])
x_gc=vectrElec_gc[3]/mOmegaLarm # cm
numer=(vectrIon[0]-x_gc)*cosOmega_gc- \
(vectrIon[2]-vectrElec_gc[2])*sinOmega_gc # cm
denom=((vectrIon[0]-x_gc)**2+(vectrIon[2]-vectrElec_gc[2])**2+ \
(vectrIon[4]-vectrElec_gc[4])**2+rhoLarm_gc**2)**(3./2.) # cm^3
action=vectrElec_gc[1]+dpFactor_gc*numer*rhoLarm_gc/(omega_L*denom) # g*cm^2/sec
# C1=np.sqrt((vectrIon[0]-x_gc)**2+ \
# (vectrIon[2]-vectrElec_gc[2])**2+ \
# (vectrIon[4]-vectrElec_gc[4])**2+2.*action/mOmegaLarm) # cm^2
C1=(vectrIon[0]-x_gc)**2+(vectrIon[2]-vectrElec_gc[2])**2+ \
(vectrIon[4]-vectrElec_gc[4])**2+2.*action/mOmegaLarm # cm^2
C2=2.*((vectrIon[0]-x_gc)*vectrIon[1]/M_ion+ \
(vectrIon[2]-vectrElec_gc[2])*vectrIon[3]/M_ion+ \
(vectrIon[4]-vectrElec_gc[4])* \
(vectrIon[5]/M_ion-vectrElec_gc[5]/m_elec)) # cm^2/sec
C3=(vectrIon[1]/M_ion)**2+(vectrIon[3]/M_ion)**2+ \
(vectrIon[5]/M_ion-vectrElec_gc[5]/m_elec)**2 # cm^2/sec^2
b=np.sqrt(C1+C2*deltaT+C3*deltaT**2) # cm
D1=(2.*C3*deltaT+C2)/b-C2/np.sqrt(C1) # cm/sec
D2=(C2*deltaT+2.*C1)/b-2.*np.sqrt(C1) # cm
q=4.*C1*C3-C2**2 # cm^4/sec^2
# Dimensions of dpIon, deElec are g*cm/sec:
dpIon[0]=-2.*dpFactor_gc/q*((vectrIon[0]-x_gc)*D1-vectrIon[1]/M_ion*D2)
dpIon[1]=-2.*dpFactor_gc/q*((vectrIon[2]-vectrElec_gc[2])*D1- \
vectrIon[3]/M_ion*D2)
dpIon[2]=-2.*dpFactor_gc/q*((vectrIon[4]-vectrElec_gc[4])*D1- \
(vectrIon[5]/M_ion-vectrElec_gc[5]/m_elec)*D2)
dpElec[0]=-dpIon[0]
dpElec[1]=-dpIon[1]
dpElec[2]=-dpIon[2]
dy_gc=dpIon[0]/mOmegaLarm # cm
# print ('dpIon[0]=%e, dpIon[1]=%e, dpIon[2]=%e' % \
# (dpIon[0],dpIon[1],dpIon[2]))
return dpIon,dpElec,action,dy_gc,C1,C2,C3,b,D1,D2,q
#
# Minimized functional (my own Least Squares Method - LSM;
# Python has own routine for LSM - see site
# http://scipy-cookbook.readthedocs.io/items/FittingData.html):
#
# Funcional = {log10(funcY) - [fitB*log10(argX) + fitA]}^2
#
def fitting(nPar1,nPar2,argX,funcY):
log10argX = np.zeros((nPar1,nPar2))
log10funcY = np.zeros((nPar1,nPar2))
for i in range(nVion):
for n in range(nPar1):
log10argX[n,i] = np.log10(argX[n,i])
log10funcY[n,i] = np.log10(funcY[n,i])
sumArgX = np.zeros(nPar2)
sumArgX2 = np.zeros(nPar2)
sumFuncY = np.zeros(nPar2)
sumArgXfuncY= np.zeros(nPar2)
fitA = np.zeros(nPar2)
fitB = np.zeros(nPar2)
for i in range(nPar2):
for n in range(nPar1):
sumArgX[i] += log10argX[n,i]
sumArgX2[i] += log10argX[n,i]**2
sumFuncY[i] += log10funcY[n,i]
sumArgXfuncY[i] += log10argX[n,i]*log10funcY[n,i]
delta = sumArgX[i]**2-nPar1*sumArgX2[i]
fitA[i] = (sumArgX[i]*sumArgXfuncY[i]-sumArgX2[i]*sumFuncY[i])/delta
fitB[i] = (sumArgX[i]*sumFuncY[i]-nPar1*sumArgXfuncY[i])/delta
# print ('fitA(%d) = %e, fitB(%d) = %e' % (i,fitA[i],i,fitB[i]))
argXfit = np.zeros((nPar1,nPar2))
funcYfit = np.zeros((nPar1,nPar2))
funcHi2 = np.zeros(nPar2)
for i in range(nPar2):
factorA = math.pow(10.,fitA[i])
for n in range(nPar1):
argXfit[n,i] = math.pow(10.,log10argX[n,i])
funcYfit[n,i] = factorA*math.pow(argXfit[n,i],fitB[i])
funcHi2[i] += (np.log10(abs(funcY[n,i])) - np.log10(abs(funcYfit[n,i])))**2
return fitA,fitB,funcHi2,argXfit,funcYfit
#
# +-Errors for fitied parameters fitA and fitB:
#
def errFitAB(nPar1,nPar2,argX,funcY,fitA,fitB,funcHi2,errVar,errType):
log10argX = np.zeros((nPar1,nPar2))
log10funcY = np.zeros((nPar1,nPar2))
sumArgX = np.zeros(nPar2)
sumArgX2 = np.zeros(nPar2)
posErrFit = np.zeros(nPar2)
negErrFit = np.zeros(nPar2)
# return posErrFit,negErrFit
stepA = 5.e-4*mean(funcHi2)
stepB = 1.e-4*mean(funcHi2)
# print ('errFitAB: mean(funcHi2) = %e, stepA = %e, stepB = %e' % (mean(funcHi2),stepA,stepB))
for i in range(nPar2):
for n in range(nPar1):
log10argX[n,i] = np.log10(argX[n,i])
log10funcY[n,i] = np.log10(funcY[n,i])
sumArgX[i] += log10argX[n,i]
sumArgX2[i] += log10argX[n,i]**2
for i in range(nPar2):
k = 0
deltaFuncHi2 = 0.
while (deltaFuncHi2 < 1.):
k += 1
if k > 2000:
print ('Break in errFitAB (Fit funcY: case %d); positive error) for %d' % (errVar,i))
break
# print ('i=%d: fitParamtr = %e, funcHi2 = %e' % (i,fitParamtr[i], funcHi2[i]))
curFitA = fitA[i]
if (int(errVar) == 1):
curFitA = fitA[i] + k*stepA
curFuncHi2 = 0.
factorA = math.pow(10.,curFitA)
curFitB = fitB[i]
if (int(errVar) == 2):
curFitB = fitB[i] + k*stepB
curFuncHi2 = 0.
for n in range(nPar1):
curArgX = math.pow(10.,log10argX[n,i])
curFuncYfit = factorA*math.pow(curArgX,curFitB)
curFuncHi2 += (np.log10(abs(curFuncYfit)) - log10funcY[n,i])**2
deltaFuncHi2 = curFuncHi2 - funcHi2[i]
if (int(errVar) == 1):
posErrFit[i] = abs(curFitA - fitA[i])
else:
posErrFit[i] = abs(curFitB - fitB[i])
func1sigma2 = funcHi2[i]/(nPar2-3)
if (int(errVar) == 1):
fitSigma = np.sqrt(sumArgX2[i]/(nPar2*sumArgX2[i]-sumArgX[i]**2)*func1sigma2)
else:
fitSigma = np.sqrt(nPar2/(nPar2*sumArgX2[i]-sumArgX[i]**2)*func1sigma2)
if (int(errType) == 2):
posErrFit[i] = fitSigma
# if (int(errVar) == 1):
# print ('i=%d: fitA = %e + %e (%e), funcHi2 = %e (for %d steps curFuncHi2 = %e)' % \
# (i,fitA[i],posErrFit[i],fitSigma,funcHi2[i],k,curFuncHi2))
# else:
# print ('i=%d: fitB = %e + %e (%e), funcHi2 = %e (for %d steps curFuncHi2 = %e)' % \
# (i,fitB[i],posErrFit[i],fitSigma,funcHi2[i],k,curFuncHi2))
for i in range(nPar2):
k = 0
deltaFuncHi2 = 0.
while (deltaFuncHi2 < 1.):
k += 1
if k > 2000:
print ('Break in errFitAB (Fit funcY: case %d); negative error) for %d' % (errVar,i))
break
curFitA = fitA[i]
if (int(errVar) == 1):
curFitA = fitA[i] - k*stepA
factorA = math.pow(10.,curFitA)
curFitB = fitB[i]
if (int(errVar) == 2):
curFitB = fitB[i] - k*stepB
curFuncHi2 = 0.
for n in range(nPar1):
curArgX = math.pow(10.,log10argX[n,i])
curFuncYfit = factorA*math.pow(curArgX,curFitB)
curFuncHi2 += (np.log10(abs(curFuncYfit)) - log10funcY[n,i])**2
deltaFuncHi2 = curFuncHi2 - funcHi2[i]
if (int(errVar) == 1):
negErrFit[i] = abs(curFitA - fitA[i])
else:
negErrFit[i] = abs(curFitB - fitB[i])
if (int(errType) == 2):
negErrFit[i] = posErrFit[i]
# if (errVar == 1):
# print ('i=%d: fitA = %e - %e, funcHi2 = %e (for %d steps curFuncHi2 = %e)' % \
# (i,fitA[i],posErrFit[i],funcHi2[i],k,curFuncHi2))
# else:
# print ('i=%d: fitB = %e - %e, funcHi2 = %e (for %d steps curFuncHi2 = %e)' % \
# (i,fitB[i],negErrFit[i],funcHi2[i],k,curFuncHi2))
return posErrFit,negErrFit
def fittedGKintegration(xMin,xMax,fitA,fitB):
#
# "Gauss-Kronrod" method of integration (GK)
#
#
# Points (psi_i) and weigths (w_i) to integrate for interval from -1 to 1;
# These data are from <NAME>. "Handbook of Mathematical Science".
# 5th Edition, CRC Press, Inc, 1978.
#
# To integrate for interval from 0 to 1 it is necessary to change points
# psi_i with points ksi_i=(1+psi_i)/2;
#
# For method with order N for function F(x):
# int_(-1)^1 = sum_1^N [w_i* F(psi_i)];
#
# In case of integration over interval from a to b:
# int_(a)^b = (b-a)/2 * sum_1^N [w_i* F(x_i)], where
# x_i = (b-a)*psi_i/2+(a+b)/2.
#
#----------------------------------------------------
#
# Data for GK:
#
#----------------------------------------------------
nPoints_GK = 16
psi_16=np.array([-0.9894009, -0.9445750, -0.8656312, -0.7554044, -0.6178762, \
-0.4580168, -0.2816036, -0.0950125, 0.0950125, 0.2816036, \
0.4580168, 0.6178762, 0.7554044, 0.8656312, 0.9445750, \
0.9894009])
w_16 =np.array([ 0.0271525, 0.0622535, 0.0951585, 0.1246290, 0.1495960, \
0.1691565, 0.1826034, 0.1894506, 0.1894506, 0.1826034, \
0.1691565, 0.1495960, 0.1246290, 0.0951585, 0.0622535, \
0.0271525])
y = np.zeros(nPoints_GK)
yIntegrated = 0.
for n in range(nPoints_GK):
xCrrnt = psi_16[n]*(xMax-xMin)/2 + (xMax+xMin)/2.
factorA = math.pow(10.,fitA)
y[n] = factorA*math.pow(xCrrnt,fitB)
yIntegrated += (xMax-xMin)*w_16[n]*y[n]*xCrrnt
return y,yIntegrated
#------------------ End of defined functions -----------------------
#
#====================================================================
sphereNe=3.
R_e=math.pow(sphereNe/n_e,1./3) # cm
print ('R_e (cm)=%e' % R_e)
ro_Larm = eVrmsTran/omega_L # cm
print ('ro_Larm (cm)=%e' % ro_Larm)
impctPrmtrMin=2.*ro_Larm
# rhoDependenceFlag = 1 # skip calculation of rho dependence if = 0!
#============ Important flags ===========================
#
# Taking into account the transfer of momenta for both particles
# (for "classical" only):
dpTransferFlag = 1 # no taking into account if = 0!
#
saveFilesFlag = 0 # no saving if = 0!
#
plotFigureFlag = 1 # plot if = 1!
#
#========================================================
nVion=50
Vion=np.zeros(nVion)
VionLong=np.zeros(nVion)
VionTrnsv=np.zeros(nVion)
VionRel=np.zeros(nVion)
vIonMin=4.e-3*eVrmsTran
vIonMax=10.*eVrmsTran
vIonMinRel=vIonMin/V0
vIonMaxRel=vIonMax/V0
print ('VionMin=%e (vIonMinRel=%e), vIonMax=%e (vIonMaxRel=%e)' % \
(vIonMin,vIonMinRel,vIonMax,vIonMaxRel))
vIonLogStep=math.log10(vIonMax/vIonMin)/(nVion-1)
R_debye=np.zeros(nVion)
R_pass=np.zeros(nVion)
R_pass_1=np.zeros(nVion) # for longT=0. --> eVrmsLong=0.
impctPrmtrMax=np.zeros(nVion)
impctPrmtrMax_1=np.zeros(nVion) # for longT=0. --> eVrmsLong=0.
for i in range(nVion):
crrntLogVionRel=math.log10(vIonMinRel)+i*vIonLogStep
VionRel[i]=math.pow(10.,crrntLogVionRel)
Vion[i]=VionRel[i]*V0
VionLong[i]=Vion[i]*np.cos(thetaVi)
VionTrnsv[i]=Vion[i]*np.sin(thetaVi)
R_debye[i]=np.sqrt(Vion[i]**2+eVrmsTran**2+eVrmsLong**2)/omega_p
R_pass[i]=np.sqrt(Vion[i]**2+eVrmsLong**2)*coolPassTime
R_pass_1[i]=np.sqrt(Vion[i]**2+0.*eVrmsLong**2)*coolPassTime
help=max(R_debye[i],R_e)
impctPrmtrMax[i]=min(help,R_pass[i])
impctPrmtrMax_1[i]=min(help,R_pass_1[i])
#-----------------------------------------------------------------
# Checking of corection of the maximal impact parameter on depence
# of preset number of minimal Larmor turns
#
larmorTurnsMin=[10,20,30,40]
impctPrmtrMaxCrrctd=np.zeros((nVion,4))
impctPrmtrMaxCrrctdRel=np.zeros((nVion,4))
for n in range (4):
for i in range(nVion):
impctPrmtrMaxCrrctd[i,n]=impctPrmtrMax[i]* \
np.sqrt(1.- (pi*larmorTurnsMin[n]*eVrmsLong/omega_L/impctPrmtrMax[i])**2)
impctPrmtrMaxCrrctdRel[i,n]=impctPrmtrMaxCrrctd[i,n]/impctPrmtrMax[i]
#
# First plotting:
#
if (plotFigureFlag == 0):
fig10 = plt.figure(10)
plt.semilogx(impctPrmtrMax,impctPrmtrMaxCrrctdRel[:,0],'-r', \
impctPrmtrMax,impctPrmtrMaxCrrctdRel[:,1],'-b', \
impctPrmtrMax,impctPrmtrMaxCrrctdRel[:,2],'-g', \
impctPrmtrMax,impctPrmtrMaxCrrctdRel[:,3],'-m',linewidth=2)
plt.grid(True)
hold=True
plt.xlabel('Maximal Impact parameter $R_{max}$, cm',color='m',fontsize=16)
plt.ylabel('$R_{max}^{Crrctd}/R_{Max}$',color='m',fontsize=16)
# plt.xlim([.9*min(impctPrmtrMax),1.1*max(impctPrmtrMax)])
plt.xlim([1.e-2,1.1*max(impctPrmtrMax)])
plt.ylim([.986,1.001])
titleHeader='$R_{max}^{Crrctd}=R_{Max} \cdot [1-(\pi\cdot N_{Larm} \cdot'
titleHeader += '\Delta_{e||}/(\omega_{Larm} \cdot R_{max})]^{1/2}$'
plt.title(titleHeader,color='m',fontsize=16)
plt.legend([('$N_{Larm}=$%2d' % larmorTurnsMin[0]), \
('$N_{Larm}=$%2d' % larmorTurnsMin[1]), \
('$N_{Larm}=$%2d' % larmorTurnsMin[2]), \
('$N_{Larm}=$%2d' % larmorTurnsMin[3])],loc='lower center',fontsize=14)
if (saveFilesFlag == 1):
fig10.savefig('picturesCMA/correctedRmax_fig10cma.png')
print ('File "picturesCMA/correctedRmax_fig10cma.png" is written')
xLimit=[.9*VionRel[0],1.1*VionRel[nVion-1]]
#
# Typs of collisions:
#
if (plotFigureFlag == 0):
fig3151=plt.figure (3151)
plt.loglog(VionRel,impctPrmtrMax,'-r', VionRel,impctPrmtrMax_1,'--r', \
[VionRel[0],VionRel[nVion-1]],[impctPrmtrMin,impctPrmtrMin],'-b',linewidth=2)
plt.grid(True)
hold=True
plt.xlabel('Relative Ion Velocity, $V_i/V_{e0}$',color='m',fontsize=14)
plt.ylabel('Impact Parameter, cm',color='m',fontsize=14)
titleHeader= \
'Types of Collisions: $V_{e0}=%4.2f\cdot10^{%2d}$ cm/s, $B=%6.1f$ Gs'
plt.title(titleHeader % (mantV0,powV0,fieldB[0]),color='m',fontsize=16)
plt.xlim(xLimit)
yLimit=[8.e-4,.6]
plt.ylim(yLimit)
plt.plot([relVeTrnsv,relVeTrnsv],yLimit,'--m',linewidth=1)
plt.text(1.6e-3,5.e-4,'$ \Delta V_{e\perp}/ V_{e0}$',color='m',fontsize=14)
plt.plot([relVeLong,relVeLong],yLimit,'--m',linewidth=1)
plt.text(4.4e-5,.0018,'$ \Delta V_{e||}/ V_{e0}$',color='m',fontsize=14)
plt.text(3.e-4,1.75e-3,'$R_{min}=2\cdot<rho_\perp>$',color='k',fontsize=16)
plt.text(7.e-4,5.e-2,'$R_{max}$',color='k',fontsize=16)
plt.text(2.85e-5,3.3e-3,'$R_{max}$ $for$ $T_{e||}=0$',color='k',fontsize=16)
plt.plot([VionRel[0],VionRel[nVion-1]],[20.*rhoCrit,20.*rhoCrit],color='k')
plt.text(1.e-4,7.e-3,'Magnetized Collisions',color='r',fontsize=20)
plt.text(1.e-4,10.e-4,'Adiabatic or Fast Collisions',color='r',fontsize=20)
plt.text(2.25e-5,.275,'Collisions are Screened',color='r',fontsize=20)
plt.text(1.6e-5,1.e-3,'$ \cong 20\cdot R_{Crit}$',color='k',fontsize=16)
if (saveFilesFlag == 1):
fig3151.savefig('picturesCMA_v7/impctPrmtr_fig3151cma.png')
print ('File "picturesCMA_v7/impctPrmtr_fig3151cma.png" is written')
#
# Picture for HESR:
#
if (plotFigureFlag == 0):
fig3151=plt.figure (3151)
plt.loglog(VionRel,impctPrmtrMax,'-r', VionRel,impctPrmtrMax_1,'--r', \
[VionRel[0],VionRel[nVion-1]],[impctPrmtrMin,impctPrmtrMin],'-b',linewidth=2)
plt.grid(True)
hold=True
plt.xlabel('Relative Ion Velocity, $V_i/V_{e0}$',color='m',fontsize=14)
plt.ylabel('Impact Parameter, cm',color='m',fontsize=14)
titleHeader= \
'HESR Types of Collisions: $V_{e0}=%3.1f\cdot10^{%2d}$cm/s, $B=%3.1f$T'
plt.title(titleHeader % (mantV0,powV0,1.e-4*fieldB[0]),color='m',fontsize=16)
plt.xlim(xLimit)
yLimit=[8.e-4,.6]
plt.ylim(yLimit)
plt.plot([relVeTrnsv,relVeTrnsv],yLimit,'--m',linewidth=1)
plt.text(4.4e-4,8.4e-4,'$ \Delta V_{e\perp}/ V_{e0}$',color='m',fontsize=14)
plt.plot([relVeLong,relVeLong],yLimit,'--m',linewidth=1)
plt.text(1.e-4,8.4e-4,'$ \Delta V_{e||}/ V_{e0}$',color='m',fontsize=14)
plt.text(3.7e-6,3.4e-3,'$R_{min}=2\cdot<rho_\perp>$',color='b',fontsize=16)
plt.text(2.8e-4,.1,'$R_{max}$',color='k',fontsize=16)
plt.text(1.e-4,1.8e-2,'$R_{max}$ $for$ $T_{e||}=0$',color='k',fontsize=16)
plt.plot([VionRel[0],VionRel[nVion-1]],[20.*rhoCrit,20.*rhoCrit],color='k')
plt.text(6.8e-5,7.e-3,'Magnetized Collisions',color='r',fontsize=20)
plt.text(6.8e-5,1.2e-3,'Weak Collisions',color='r',fontsize=20)
plt.text(2.3e-5,1.95e-3,'Adiabatic or Fast Collisions',color='r',fontsize=20)
plt.text(2.e-5,.275,'Screened Collisions',color='r',fontsize=20)
plt.text(3.58e-6,2.05e-3,'$\cong$20$\cdot$$R_{Crit}$',color='k',fontsize=16)
if (saveFilesFlag == 1):
# fig3151.savefig('picturesCMA_v7/impctPrmtr_fig3151cma.png')
# print ('File "picturesCMA_v7/impctPrmtr_fig3151cma.png" is written')
fig3151.savefig('HESRimpctPrmtr_fig3151cma.png')
print ('File "HESRimpctPrmtr_fig3151cma.png" is written')
#
# Picture for EIC:
#
if (plotFigureFlag == 0):
fig3151=plt.figure (3151)
plt.loglog(VionRel,impctPrmtrMax,'-r', VionRel,impctPrmtrMax_1,'--r', \
[VionRel[0],VionRel[nVion-1]],[impctPrmtrMin,impctPrmtrMin],'-b',linewidth=2)
plt.grid(True)
hold=True
plt.xlabel('Relative Ion Velocity, $V_i/V_{e0}$',color='m',fontsize=14)
plt.ylabel('Impact Parameter, cm',color='m',fontsize=14)
titleHeader= \
'EIC Types of Collisions: $V_{e0}=%3.1f\cdot10^{%2d}$cm/s, $B=%3.1f$T'
plt.title(titleHeader % (mantV0,powV0,1.e-4*fieldB[0]),color='m',fontsize=16)
plt.xlim(xLimit)
yLimit=[5.e-5,.3]
plt.ylim(yLimit)
plt.plot([relVeTrnsv,relVeTrnsv],yLimit,'--m',linewidth=1)
plt.text(9.e-4,4.e-5,'$ \Delta V_{e\perp}/ V_{e0}$',color='m',fontsize=14)
plt.plot([relVeLong,relVeLong],yLimit,'--m',linewidth=1)
plt.text(1.7e-4,3.e-5,'$ \Delta V_{e||}/ V_{e0}$',color='m',fontsize=14)
plt.text(6.3e-6,1.1e-4,'$R_{min}=2\cdot<rho_\perp>$',color='b',fontsize=16)
plt.text(1.e-4,2.1e-2,'$R_{max}$',color='k',fontsize=16)
plt.text(2.57e-5,5.e-3,'$R_{max}$ $for$ $T_{e||}=0$',color='k',fontsize=16)
plt.plot([VionRel[0],VionRel[nVion-1]],[20.*rhoCrit,20.*rhoCrit],color='k')
plt.text(2.3e-5,1.e-3,'Magnetized Collisions',color='r',fontsize=20)
# plt.text(6.8e-5,1.2e-3,'Weak Collisions',color='r',fontsize=20)
plt.text(1.1e-5,5.7e-5,'Weak or Adiabatic or Fast Collisions',color='r',fontsize=16)
plt.text(2.e-5,.15,'Screened Collisions',color='r',fontsize=20)
plt.text(2.5e-3,1.7e-4,'$\cong$20$\cdot$$R_{Crit}$',color='k',fontsize=16)
if (saveFilesFlag == 1):
# fig3151.savefig('picturesCMA_v7/impctPrmtr_fig3151cma.png')
# print ('File "picturesCMA_v7/impctPrmtr_fig3151cma.png" is written')
fig3151.savefig('EICimpctPrmtr_fig3151cma.png')
print ('File "EICimpctPrmtr_fig3151cma.png" is written')
# plt.show()
# sys.exit()
#
# Magnetized collisions:
#
if (plotFigureFlag == 0):
fig209=plt.figure (209)
plt.loglog(VionRel,R_debye,'-r',VionRel,R_pass,'-b', \
VionRel,R_pass_1,'--b',linewidth=2)
plt.grid(True)
hold=True
plt.plot([VionRel[0],VionRel[nVion-1]],[R_e,R_e],color='m',linewidth=2)
plt.xlabel('Relative Ion Velocity, $V_i/V_{e0}$',color='m',fontsize=16)
plt.ylabel('$R_{Debye}$, $R_{Pass}$, $R_e$, cm',color='m',fontsize=16)
# titleHeader='Magnetized Collision: $R_{Debye}$, $R_{Pass}$, $R_e$: $V_{e0}=%5.3f\cdot10^{%2d}$cm/s'
# plt.title(titleHeader % (mantV0,powV0),color='m',fontsize=16)
plt.title('Magnetized Collisions: $R_{Debye}$, $R_{Pass}$, $R_e$',color='m',fontsize=16)
plt.xlim(xLimit)
yLimit=[1.e-3,10.]
plt.ylim(yLimit)
plt.plot([relVeTrnsv,relVeTrnsv],yLimit,'--m',linewidth=1)
plt.text(1.6e-3,5.5e-4,'$ \Delta V_{e\perp}/ V_{e0}$',color='m',fontsize=14)
plt.plot([relVeLong,relVeLong],yLimit,'--m',linewidth=1)
plt.text(4.4e-5,0.001175,'$ \Delta V_{e||}/ V_{e0}$',color='m',fontsize=14)
plt.text(3.e-5,2.45e-3,'$R_e$',color='k',fontsize=16)
plt.text(3.e-5,5.e-2,'$R_{Debye}$',color='k',fontsize=16)
plt.text(3.e-5,1.8e-2,'$R_{Pass}$',color='k',fontsize=16)
plt.text(4.5e-5,4.8e-3,'$R_{Pass}$ $for$ $T_{e||}=0$',color='k',fontsize=16)
plt.text(8.3e-5,4.0,('$V_{e0}=%5.3f\cdot10^{%2d}$cm/s' % (mantV0,powV0)), \
color='m',fontsize=16)
if (saveFilesFlag == 1):
fig209.savefig('picturesCMA/rDebye_rLikeDebye_rPass_fig209cma.png')
print ('File "picturesCMA/rDebye_rLikeDebye_rPass_fig209cma.png" is written')
#
# Coulomb logarithm evaluation:
#
clmbLog = np.zeros(nVion)
for i in range(nVion):
clmbLog[i] = math.log(impctPrmtrMax[i]/impctPrmtrMin)
# clmbLog[i] = math.log(impctPrmtrMax_1[i]/impctPrmtrMin)
if (plotFigureFlag == 0):
fig3155=plt.figure (3155)
plt.semilogx(VionRel,clmbLog,'-xr',linewidth=2)
plt.xlabel('Relative Ion Velocity, $V_i/V_{e0}$',color='m',fontsize=14)
plt.ylabel('Coulomb Logarithm $L_c$',color='m',fontsize=14)
plt.title('Coulomb Logarithm: $L_c$ = $ln(R_{max}/R_{min})$',color='m',fontsize=16)
yLimit=[min(clmbLog)-.1,max(clmbLog)+.1]
plt.ylim(yLimit)
plt.plot([relVeTrnsv,relVeTrnsv],yLimit,'--m',linewidth=1)
plt.text(1.6e-3,5.,'$ \Delta V_{e\perp}/ V_{e0}$',color='m',fontsize=14)
plt.plot([relVeLong,relVeLong],yLimit,'--m',linewidth=1)
plt.text(3.4e-5,5.,'$ \Delta V_{e||}/ V_{e0}$',color='m',fontsize=14)
if (saveFilesFlag == 1):
fig3155.savefig('picturesCMA_v7/coulombLogrthm_fig3155cma.png')
print ('File "picturesCMA_v7/coulombLogrthm_fig3155cma.png" is written')
#
# matrix for electron with .5*timeStep_c:
#
matr_elec_c=guidingCenter_Matrix(.5*timeStep_c)
#
# matrix for ion with mass M_ion and .5*timeStep_c:
#
matr_ion_c=drift_Matrix(M_ion,.5*timeStep_c)
larmorTurns = 10
nImpctPrmtr = 50
rhoMin = impctPrmtrMin
rhoMax = np.zeros(nVion)
log10rhoMin = math.log10(rhoMin)
crrntImpctPrmtr = np.zeros(nImpctPrmtr)
halfLintr = np.zeros((nImpctPrmtr,nVion))
pointAlongTrack = np.zeros((nImpctPrmtr,nVion))
totalPoints = 0
for i in range(nVion):
rhoMax[i] = impctPrmtrMax[i]* \
np.sqrt(1.- (pi*larmorTurns*eVrmsLong/omega_L/impctPrmtrMax[i])**2)
rhoMax[i] = impctPrmtrMax[i]
# rhoMax[i] = impctPrmtrMax_1[i] # for checking!
# print ('rhoMax(%d) = %e' % (i,rhoMax[i]))
log10rhoMax = math.log10(rhoMax[i])
log10rhoStep = (log10rhoMax-log10rhoMin)/(nImpctPrmtr)
# print ('Vion(%d) = %e, rhoMax = %e' % (i,Vion[i],rhoMax[i]))
for n in range(nImpctPrmtr):
log10rhoCrrnt = log10rhoMin+(n+0.5)*log10rhoStep
rhoCrrnt = math.pow(10.,log10rhoCrrnt)
# print (' rhoCrrnt(%d) = %e' % (n,rhoCrrnt))
halfLintr[n,i] = np.sqrt(rhoMax[i]**2-rhoCrrnt**2) # half length of interaction; cm
timeHalfPath = halfLintr[n,i]/eVrmsLong # 0.5 time of interaction; sec
numbLarmor = int(2.*timeHalfPath/T_larm)
pointAlongTrack[n,i] = int(2.*timeHalfPath/timeStep_c)
totalPoints += pointAlongTrack[n,i]
# print (' %d: rhoCrrnt = %e, numbLarmor = %d, pointAlongTrack = %d' % \
# (n,rhoCrrnt,numbLarmor,pointAlongTrack[n,i]))
# print ('totalPoints = %d' % totalPoints)
totalPoints = int(totalPoints)
nnTotalPoints=np.arange(0,2*totalPoints-1,1)
arrayA=np.zeros(2*totalPoints)
arrayB=np.zeros(2*totalPoints)
bCrrnt_c = np.zeros(2*totalPoints)
#
# Variables for different testing:
#
b_gc = np.zeros(totalPoints)
action_gc = np.zeros(totalPoints)
C1test = np.zeros(totalPoints)
C2test = np.zeros(totalPoints)
C3test = np.zeros(totalPoints)
b_ME = np.zeros(totalPoints)
D1test = np.zeros(totalPoints)
D2test = np.zeros(totalPoints)
qTest = np.zeros(totalPoints)
action_ME = np.zeros(totalPoints)
actn_gc_ME_rel = np.zeros(totalPoints)
indxTest = 0
rhoInit = np.zeros((nImpctPrmtr,nVion))
#
# "Classical" approach:
#
deltaPx_c = np.zeros((nImpctPrmtr,nVion))
deltaPy_c = np.zeros((nImpctPrmtr,nVion))
deltaPz_c = np.zeros((nImpctPrmtr,nVion))
ionVx_c = np.zeros((nImpctPrmtr,nVion))
ionVy_c = np.zeros((nImpctPrmtr,nVion))
ionVz_c = np.zeros((nImpctPrmtr,nVion))
deltaEnrgIon_c = np.zeros((nImpctPrmtr,nVion))
#
# "Magnus Expand" approach:
#
deltaPx_m = np.zeros((nImpctPrmtr,nVion))
deltaPy_m = np.zeros((nImpctPrmtr,nVion))
deltaPz_m = np.zeros((nImpctPrmtr,nVion))
ionVx_m = np.zeros((nImpctPrmtr,nVion))
ionVy_m = np.zeros((nImpctPrmtr,nVion))
ionVz_m = np.zeros((nImpctPrmtr,nVion))
deltaEnrgIon_m = np.zeros((nImpctPrmtr,nVion))
#
# Comparison of approaches (ratio deltaEnrgIon_c/deltaEnrgIon_m):
#
deltaPx_c_m = np.zeros((nImpctPrmtr,nVion))
deltaPy_c_m = np.zeros((nImpctPrmtr,nVion))
deltaPz_c_m = np.zeros((nImpctPrmtr,nVion))
dEion_c_m = np.zeros((nImpctPrmtr,nVion))
#
# Factor to calculate transferred energy to ion
# (the friction force is defined by this transfered energy):
#
deFactor = 0.5/M_ion # 1/g
frctnForce_cSM = np.zeros(nVion) # integration, using Simpson method
frctnForce_mSM = np.zeros(nVion) # integration, using Simpson method
numberWrongSign_c=0
numberWrongSign_m=0
posSignDeltaEnrgIon_c=0
negSignDeltaEnrgIon_c=0
posSignDeltaEnrgIon_m=0
negSignDeltaEnrgIon_m=0
timeRun = np.zeros(nVion)
totalTimeRun = 0.
indx = 0
# ----------------- Main simulation ---------------
#
for i in range(nVion):
# Taking into account the corection of the maximal impact parameter
# on depence of preset number of minimal Larmor turns:
rhoMax[i] = impctPrmtrMax[i]* \
np.sqrt(1.- (pi*larmorTurns*eVrmsLong/omega_L/impctPrmtrMax[i])**2)
# Without taking into account the corection of the maximal impact parameter
# on depence of preset number of minimal Larmor turns:
rhoMax[i] = impctPrmtrMax[i]
# rhoMax[i] = impctPrmtrMax_1[i] # for checking!
log10rhoMax = math.log10(rhoMax[i])
log10rhoStep = (log10rhoMax-log10rhoMin)/(nImpctPrmtr)
# print ('Vion(%d) = %e, rhoMax = %e' % (i,Vion[i],rhoMax[i]))
timeStart=os.times()
for n in range(nImpctPrmtr):
log10rhoCrrnt = log10rhoMin+(n+0.5)*log10rhoStep
rhoCrrnt = math.pow(10.,log10rhoCrrnt)
# rhoInit[i*nImpctPrmtr+n] = rhoCrrnt
rhoInit[n,i] = rhoCrrnt
halfLintr[n,i] = np.sqrt(rhoMax[i]**2-rhoCrrnt**2) # half length of interaction; cm
z_ionCrrnt_c = np.zeros(6) # Zeroing out of vector for ion ("GC"-approach)
z_elecCrrnt_c = np.zeros(6) # Zeroing out of vector for electron ("GC"-approach)
z_ionCrrnt_m = np.zeros(6) # Zeroing out of vector for ion ("ME"-approach)
z_elecCrrnt_m = np.zeros(6) # Zeroing out of vector for electron ("ME"-approach)
# Zeroing out of "guiding center" vector for electron (both approaches):
z_elecCrrnt_gc_c = np.zeros(6)
z_elecCrrnt_gc_m = np.zeros(6)
# Current values of transfered momemta
# (second index numerates "Guiding Center", (if 0) and
# "Magnus Expantion" (if 1) approaches:
dpCrrnt = np.zeros((3,2))
# Intermediate arrays:
dpIon_c = np.zeros(3)
dpIon_m = np.zeros(3)
dpElec_c = np.zeros(3)
dpElec_m = np.zeros(3)
# Current initial vector for electron:
z_elecCrrnt_c[Ix] = rhoCrrnt # x, cm
z_elecCrrnt_c[Iz] = -halfLintr[n,i] # z, cm
z_elecCrrnt_c[Ipy] = m_elec*eVrmsTran # py, g*cm/sec
z_elecCrrnt_c[Ipz] = m_elec*eVrmsLong # pz, g*cm/sec
z_elecCrrnt_m[Ix] = rhoCrrnt # x, cm
z_elecCrrnt_m[Iz] = -halfLintr[n,i] # z, cm
z_elecCrrnt_m[Ipy] = m_elec*eVrmsTran # py, g*cm/sec
z_elecCrrnt_m[Ipz] = m_elec*eVrmsLong # pz, g*cm/sec
# Current initial vector for ion velocity for both approaches:
ionVx_c[n,i] = VionTrnsv[i]*np.cos(phiVi)
ionVy_c[n,i] = VionTrnsv[i]*np.sin(phiVi)
ionVz_c[n,i] = VionLong[i]
ionVx_m[n,i] = VionTrnsv[i]*np.cos(phiVi)
ionVy_m[n,i] = VionTrnsv[i]*np.sin(phiVi)
ionVz_m[n,i] = VionLong[i]
# transfer to system of guiding center:
z_elecCrrnt_gc_c=toGuidingCenter(z_elecCrrnt_c)
z_elecCrrnt_gc_m=toGuidingCenter(z_elecCrrnt_m)
#
# Main loop along the each track:
#
for k in range(int(pointAlongTrack[n,i])):
#
# Dragging both particles through first half of the step of the track:
#
z_elecCrrnt_gc_c = np.dot(matr_elec_c,z_elecCrrnt_gc_c) # electron
z_elecCrrnt_gc_m = np.dot(matr_elec_c,z_elecCrrnt_gc_m) # electron
z_ionCrrnt_c = np.dot(matr_ion_c,z_ionCrrnt_c) # ion
z_ionCrrnt_m = np.dot(matr_ion_c,z_ionCrrnt_m) # ion
# transfer from system of guiding center:
z_elecCrrnt_c=fromGuidingCenter(z_elecCrrnt_gc_c)
z_elecCrrnt_m=fromGuidingCenter(z_elecCrrnt_gc_m)
# Current distance between ion and electron; cm:
bCrrnt_c[indx]=np.sqrt((z_ionCrrnt_c[0]-z_elecCrrnt_c[0])**2+ \
(z_ionCrrnt_c[2]-z_elecCrrnt_c[2])**2+ \
(z_ionCrrnt_c[4]-z_elecCrrnt_c[4])**2)
# Current values of parameters A,B:
arrayA[indx] = math.log10(ro_Larm/bCrrnt_c[indx])
arrayB[indx] = math.log10((q_elec**2/bCrrnt_c[indx])/kinEnergy)
indx += 1
#
# Dragging both particles through interaction during this step of track
# (for both approaches):
#
# "Guiding Center":
dpIon_c,dpElec_c,action,b_gc_c = \
guidingCenterCollision(z_elecCrrnt_gc_c,z_ionCrrnt_c,timeStep_c)
# "Magnus Expantion":
dpIon_m,dpElec_m,actionME,dy_gc_m,C1,C2,C3,b,D1,D2,q = \
MagnusExpansionCollision(z_elecCrrnt_gc_m,z_ionCrrnt_m,timeStep_c)
# Save data for testing:
b_gc[indxTest] = b_gc_c # "Guiding Center" approach
action_gc[indxTest] = action # -"- -"- -"- -"- -"- -"-
C1test[indxTest] = C1 # "Magnus expansion" approach
C2test[indxTest] = abs(C2) # -"- -"- -"- -"- -"- -"-
C3test[indxTest] = C3 # -"- -"- -"- -"- -"- -"-
b_ME[indxTest] = b # -"- -"- -"- -"- -"- -"-
D1test[indxTest] = D1 # -"- -"- -"- -"- -"- -"-
D2test[indxTest] = D2 # -"- -"- -"- -"- -"- -"-
qTest[indxTest] = q #-"- -"- -"- -"- -"- -"-
action_ME[indxTest] = actionME #-"- -"- -"- -"- -"- -"-
indxTest += 1
indxTestMax = indxTest
#
# Taking into account transfer of momentum for both particles:
#
if (dpTransferFlag == 1):
for ic in range(3):
z_ionCrrnt_c[2*ic+1] += dpIon_c[ic]
z_elecCrrnt_c[2*ic+1] += dpElec_c[ic]
z_ionCrrnt_m[2*ic+1] += dpIon_m[ic]
z_elecCrrnt_m[2*ic+1] += dpElec_m[ic]
# transfer to system of guiding center:
z_elecCrrnt_gc_c=toGuidingCenter(z_elecCrrnt_c)
z_elecCrrnt_gc_m=toGuidingCenter(z_elecCrrnt_m)
# Accumulation of the transfered momenta to ion along the track for both approaches:
for ic in range(3):
# if i == 0:
# print ('dpIon_c[%2d] = %20.14e, dpIon_m[%2d] = %20.14e' % \
# (ic,dpIon_c[ic],ic,dpIon_m[ic]))
dpCrrnt[ic,0] += dpIon_c[ic] # "Guiding Center", g*cm/sec
dpCrrnt[ic,1] += dpIon_m[ic] # "Magnus Expansion", g*cm/sec
#
# Ion's elocity change along the track - both approaches:
#
ionVx_c[n,i] += dpCrrnt[0,0]/M_ion # cm/sec
ionVy_c[n,i] += dpCrrnt[1,0]/M_ion # cm/sec
ionVz_c[n,i] += dpCrrnt[2,0]/M_ion # cm/sec
ionVx_m[n,i] += dpCrrnt[0,1]/M_ion # cm/sec
ionVy_m[n,i] += dpCrrnt[1,1]/M_ion # cm/sec
ionVz_m[n,i] += dpCrrnt[2,1]/M_ion # cm/sec
#
# Dragging both particles through second half of the step of the track:
#
z_elecCrrnt_gc_c = np.dot(matr_elec_c,z_elecCrrnt_gc_c) # electron
z_ionCrrnt_c = np.dot(matr_ion_c,z_ionCrrnt_c) # ion
z_elecCrrnt_gc_m = np.dot(matr_elec_c,z_elecCrrnt_gc_m) # electron
z_ionCrrnt_m = np.dot(matr_ion_c,z_ionCrrnt_m) # ion
# transfer from system of guiding center:
z_elecCrrnt_c=fromGuidingCenter(z_elecCrrnt_gc_c)
z_elecCrrnt_m=fromGuidingCenter(z_elecCrrnt_gc_m)
# Current distance between ion and electron; cm:
bCrrnt_c[indx]=np.sqrt((z_ionCrrnt_c[0]-z_elecCrrnt_c[0])**2+ \
(z_ionCrrnt_c[2]-z_elecCrrnt_c[2])**2+ \
(z_ionCrrnt_c[4]-z_elecCrrnt_c[4])**2)
# Current values of parameters A,B:
arrayA[indx] = math.log10(ro_Larm/bCrrnt_c[indx])
arrayB[indx] = math.log10((q_elec**2/bCrrnt_c[indx])/kinEnergy)
indx += 1
#
# Transferred momenta along the track - "Guiding Center" approach:
#
deltaPx_c[n,i] = dpCrrnt[0,0] # dpx, g*cm/sec
# if deltaPx_c[n,i] <= 0.:
# print ('deltaPx_c[%2d,%2d] = %e, dpCrrnt[%2d,%2d] = %e' % \
# (n,i,deltaPx_c[n,i],n,i,dpCrrnt[0,0]))
deltaPy_c[n,i] = dpCrrnt[1,0] # dpy, g*cm/sec
# if deltaPy_c[n,i] <= 0.:
# print ('deltaPy_c[%2d,%2d] = %e' % (n,i,deltaPy_c[n,i]))
deltaPz_c[n,i] = dpCrrnt[2,0] # dpz, g*cm/sec
# if deltaPz_c[n,i] <= 0.:
# print ('deltaPz_c[%2d,%2d] = %e' % (n,i,deltaPz_c[n,i]))
# Incorrect value:
# deltaEnrgIon_c[n,i] = (dpCrrnt[0,0]**2+dpCrrnt[1,0]**2+dpCrrnt[2,0]**2)* \
# deFactor/eVtoErg # eV
# Correct value:
crrntDeltaEnrg = (dpCrrnt[0,0]*ionVx_c[n,i]+ \
dpCrrnt[1,0]*ionVy_c[n,i]+ \
dpCrrnt[2,0]*ionVz_c[n,i])*deFactor/eVtoErg # eV
absDeltaEnrgIon_c = abs(crrntDeltaEnrg)
if (crrntDeltaEnrg != 0.):
signDeltaEnrgIon_c = crrntDeltaEnrg/abs(crrntDeltaEnrg)
deltaEnrgIon_c[n,i] = crrntDeltaEnrg
if (deltaEnrgIon_c[n,i] > 0.):
posSignDeltaEnrgIon_c += 1
else:
negSignDeltaEnrgIon_c += 1
#
# Transferred momenta along the track - "Magnus expansion" approach:
#
deltaPx_m[n,i] = dpCrrnt[0,1] # dpx, g*cm/sec
# if deltaPx_m[n,i] <= 0.:
# print ('deltaPx_m[%2d,%2d] = %e' % (n,i,deltaPx_m[n,i]))
deltaPy_m[n,i] = dpCrrnt[1,1]
# if deltaPy_m[n,i] <= 0.:
# print ('deltaPy_m[%2d,%2d] = %e' % (n,i,deltaPy_m[n,i]))
deltaPz_m[n,i] = dpCrrnt[2,1]
# if deltaPz_m[n,i] <= 0.:
# print ('deltaPz_m[%2d,%2d] = %e' % (n,i,deltaPz_m[n,i]))
# Incorrect value:
# deltaEnrgIon_m[n,i] = (dpCrrnt[0,1]**2+dpCrrnt[1,1]**2+dpCrrnt[2,1]**2)* \
# deFactor/eVtoErg # eV
# Correct value absolute value):
crrntDeltaEnrg = (dpCrrnt[0,1]*ionVx_m[n,i]+ \
dpCrrnt[1,1]*ionVy_m[n,i]+ \
dpCrrnt[2,1]*ionVz_m[n,i])*deFactor/eVtoErg # eV
absDeltaEnrgIon_m = abs(crrntDeltaEnrg)
if (crrntDeltaEnrg != 0.):
signDeltaEnrgIon_m = crrntDeltaEnrg/abs(crrntDeltaEnrg)
deltaEnrgIon_m[n,i] = crrntDeltaEnrg
if (deltaEnrgIon_m[n,i] > 0.):
posSignDeltaEnrgIon_m += 1
else:
negSignDeltaEnrgIon_m += 1
#
# Comparison of the approaches (%):
#
if (deltaPx_m[n,i] != 0.):
deltaPx_c_m[n,i] = 100.*(deltaPx_c[n,i]/deltaPx_m[n,i]-1.)
else:
print ('Bad value (=0.) of deltaPx_m[%d,%d] = ' % (n,i))
if (deltaPy_m[n,i] != 0.):
deltaPy_c_m[n,i] = 100.*(deltaPy_c[n,i]/deltaPy_m[n,i]-1.)
else:
print ('Bad value (=0.) of deltaPy_m[%d,%d] = ' % (n,i))
if (deltaPz_m[n,i] != 0.):
deltaPz_c_m[n,i] = 100.*(deltaPz_c[n,i]/deltaPz_m[n,i]-1.)
else:
print ('Bad value (=0.) of deltaPz_m[%d,%d] = ' % (n,i))
if (deltaEnrgIon_m[n,i] != 0.):
dEion_c_m[n,i] = 100.*(deltaEnrgIon_c[n,i]/deltaEnrgIon_m[n,i]-1.)
else:
print ('Bad value (=0.) of deltaEnrgIon_m[%d,%d] = ' % (n,i))
#
# Integration using Simpson method:
#
if (n > 0):
frctnForce_cSM[i] += pi*n_e*100.*(deltaEnrgIon_c[n,i]+deltaEnrgIon_c[n-1,i])* \
.5*(rhoInit[n,i]+rhoInit[n-1,i])* \
(rhoInit[n,i]-rhoInit[n-1,i]) # eV/m
frctnForce_mSM[i] += pi*n_e*100.*(deltaEnrgIon_m[n,i]+deltaEnrgIon_m[n-1,i])* \
.5*(rhoInit[n,i]+rhoInit[n-1,i])* \
(rhoInit[n,i]-rhoInit[n-1,i]) # eV/m
timeEnd = os.times()
timeRun[i] = float(timeEnd[0])-float(timeStart[0]) # CPU time , sec
totalTimeRun += timeRun[i]
print ('timeRun(%2d) = %6.3f seconds' % (i,timeRun[i]))
print ('Total time (icluding Simpson integration) = %6.3f seconds' % totalTimeRun)
print ('deltaEnrgIon_c: nPos=%d, nNeg=%d; deltaEnrgIon_m: nPos=%d, nNeg=%d' % \
(posSignDeltaEnrgIon_c,negSignDeltaEnrgIon_c, \
posSignDeltaEnrgIon_m,negSignDeltaEnrgIon_m))
#
# Output for checking:
#
# print \
# ('n Px_c Px_m Py_c Py_m Pz_c Pz_m Pz_c_m')
# for i in range(10,11,1):
# for n in range(nImpctPrmtr):
# print ('%d: %e %e %e %e %e %e %e' % \
# (n,deltaPx_c[n,i],deltaPx_m[n,i],deltaPy_c[n,i], \
# deltaPy_m[n,i],deltaPz_c[n,i],deltaPz_m[n,i],deltaPz_c_m[n,i]))
# print ('n dEion_c dEion_m')
# for i in range(10,11,1):
# for n in range(nImpctPrmtr):
# print ('%d: %e %e ' % (n,deltaEnrgIon_c[n,i],deltaEnrgIon_m[n,i]))
# print ('indxTestMax = %d' % indxTestMax)
#
# Plotting of the tests:
#
nn=np.arange(0,indxTestMax-1,1)
#
# C1:
#
if (plotFigureFlag == 0):
fig2020=plt.figure (2020)
plt.plot(nn,C1test[0:indxTestMax-1],'.r')
plt.xlabel('Points of Tracks',color='m',fontsize=16)
plt.ylabel('$C1$, $cm^2$',color='m',fontsize=16)
plt.title('$C1=[x_{gc}^2+y_{gc}^2+z_e^2+2J/(m_e \cdot \Omega_e)]^{0.5}$', \
color='m',fontsize=16)
plt.xlim([-5000,indxTestMax+5000])
plt.grid(True)
if (saveFilesFlag == 1):
fig2020.savefig('picturesCMA_v7/magnusExpansion_C1_fig2020cma.png')
print ('File "picturesCMA_v7/magnusExpansion_C1_fig2020cma.png" is written')
#
# C2:
#
if (plotFigureFlag == 0):
fig2030=plt.figure (2030)
plt.plot(nn,1.e-5*C2test[0:indxTestMax-1],'.r')
plt.xlabel('Points of Tracks',color='m',fontsize=16)
plt.ylabel('$C2$, $\cdot 10^5$ $cm^2/s$',color='m',fontsize=16)
plt.title('$C2=2\cdot[V_{ix}\cdot(x_i-x_{gc})+V_{iy}\cdot(y_i-y_{gc})+(V_{iz}-V_{ez})\cdot(z_i-z_e)]$', \
color='m',fontsize=14)
plt.xlim([-5000,indxTestMax+5000])
plt.grid(True)
if (saveFilesFlag == 1):
fig2030.savefig('picturesCMA_v7/magnusExpansion_C2_fig2030cma.png')
print ('File "picturesCMA_v7/magnusExpansion_C2_fig2030cma.png" is written')
#
# C3:
#
if (plotFigureFlag == 0):
fig2040=plt.figure (2040)
plt.plot(nn,1e-11*C3test[0:indxTestMax-1],'.r')
plt.xlabel('Points of Tracks',color='m',fontsize=16)
plt.ylabel('$C3$, $\cdot 10^{11}$ $cm^2/s^2$',color='m',fontsize=16)
plt.title('$C3=V_{ix}^2+V_{iy}^2+(V_{iz}-V_{ez})^2$',color='m',fontsize=16)
plt.xlim([-5000,indxTestMax+5000])
plt.grid(True)
if (saveFilesFlag == 1):
fig2040.savefig('picturesCMA_v7/magnusExpansion_C3_fig2040cma.png')
print ('File "picturesCMA_v7/magnusExpansion_C3_fig2040cma.png" is written')
#
# D1:
#
if (plotFigureFlag == 0):
fig2025=plt.figure (2025)
plt.plot(nn,1.e-5*D1test[0:indxTestMax-1],'.r')
plt.xlabel('Points of Tracks',color='m',fontsize=16)
plt.ylabel('$10^{-5}\cdot D1$, $cm/s$',color='m',fontsize=16)
plt.title('$D1=(2C_3\cdot \Delta t+C_2)/b_{ME}$ $-$ $C_2/C_1^{0.5}$',color='m',fontsize=16)
plt.xlim([-5000,indxTestMax+5000])
plt.grid(True)
if (saveFilesFlag == 1):
fig2025.savefig('picturesCMA_v7/magnusExpansion_D1_fig2025cma.png')
print ('File "picturesCMA_v7/magnusExpansion_D1_fig2025cma.png" is written')
#
# D2:
#
if (plotFigureFlag == 0):
fig2035=plt.figure (2035)
plt.plot(nn,1.e4*D2test[0:indxTestMax-1],'.r')
plt.xlabel('Points of Tracks',color='m',fontsize=16)
plt.ylabel('$10^4\cdot D2$, $cm$',color='m',fontsize=16)
plt.title('$D2=(2C_1+C_2\cdot \Delta t)/b_{ME}$ $-$ $2C_1^{0.5}$',color='m',fontsize=16)
plt.xlim([-5000,indxTestMax+5000])
plt.grid(True)
if (saveFilesFlag == 1):
fig2035.savefig('picturesCMA_v7/magnusExpansion_D2_fig2035cma.png')
print ('File "picturesCMA_v7/magnusExpansion_D2_fig2035cma.png" is written')
#
# Distance b_ME between particles for "ME" approach:
#
if (plotFigureFlag == 0):
fig2050=plt.figure (2050)
plt.plot(nn,b_ME[0:indxTestMax-1],'.r')
plt.xlabel('Points of Tracks',color='m',fontsize=16)
plt.ylabel('$b_{ME}$, $cm$',color='m',fontsize=16)
plt.title('Distance $b_{ME}$ between Particles for "ME" Approach', color='m',fontsize=16)
plt.text(3500,.4,'$b_{ME}=[C1+C2\cdot \Delta t +C3 \cdot \Delta t^2]^{0.5}$', \
color='m',fontsize=16)
plt.text(33000,.36,('$(\Delta t=%8.2e$ $s)$' % timeStep_c),color='m',fontsize=16)
plt.xlim([-5000,indxTestMax+5000])
plt.grid(True)
if (saveFilesFlag == 1):
fig2050.savefig('picturesCMA_v7/particleDistance_me_fig2050cma.png')
print ('File "picturesCMA_v7/particleDistance_me_fig2050cma.png" is written')
#
# Distance b_gc between particles for "GC" approach:
#
if (plotFigureFlag == 0):
fig2055=plt.figure (2055)
plt.plot(nn,b_gc[0:indxTestMax-1],'.r')
plt.xlabel('Points of Tracks',color='m',fontsize=16)
plt.ylabel('$b_{GC}$, $cm$',color='m',fontsize=16)
plt.title('Distance $b_{GC}$ between Particles for "GC" Approach', color='m',fontsize=16)
plt.text(0,.4,'$b_{GC}=[(x_i-x_{gc})^2+(y_i-y_{gc})^2+$',color='m',fontsize=16)
plt.text(55500,.36,'$+(z_i-z_e)^2+2J/(m_e \cdot \Omega_e)]^{0.5}$', \
color='m',fontsize=16)
plt.xlim([-5000,indxTestMax+5000])
plt.grid(True)
if (saveFilesFlag == 1):
fig2055.savefig('picturesCMA/particleDistance_gc_fig2055cma.png')
print ('File "picturesCMA/particleDistance_gc_fig2055cma.png" is written')
#
# Comparison of bCrrnt_c from "Guiding Center" with bTest from
# "Magnus expansion" approaches:
#
bCrrnt_cTest = np.zeros(indxTestMax)
bCrrnt_cTestRel = np.zeros(indxTestMax)
b_gc_ME_rel = np.zeros(indxTestMax)
for k in range(indxTestMax):
bCrrnt_cTest[k] = .5*(bCrrnt_c[2*k]+bCrrnt_c[2*k+1])
# bCrrnt_cTestRel[k] = bCrrnt_cTest[k]/b_ME[k]
b_gc_ME_rel[k] = b_gc[k]/b_ME[k]
actn_gc_ME_rel[k] = 1.e7*(action_gc[k]/action_ME[k]-1.)
if (plotFigureFlag == 0):
fig2060=plt.figure (2060)
# plt.semilogy(nn,bCrrnt_cTest[0:indxTestMax-1],'.r')
plt.plot(nn,bCrrnt_cTest[0:indxTestMax-1],'.r')
plt.xlabel('Points of Tracks',color='m',fontsize=16)
plt.ylabel('Test $b_{crrntTest}$, $cm$',color='m',fontsize=16)
plt.title('Test $b_{crrntTest} = .5 \cdot [b_{crrnt}(k)+b_{crrnt}(k+1)]$',color='m', \
fontsize=16)
plt.xlim([-5000,indxTestMax+5000])
# plt.ylim([.9*min(bCrrnt_cTest),1.1*max(bCrrnt_cTest)])
plt.grid(True)
#
# Ratio b_gc/b_ME (absolute value):
#
if (plotFigureFlag == 0):
fig2070=plt.figure (2070)
# plt.semilogy(nn,b_gc_ME_rel[0:indxTestMax-1],'.r')
plt.plot(nn,b_gc_ME_rel[0:indxTestMax-1],'.r')
plt.xlabel('Points of Tracks',color='m',fontsize=16)
plt.ylabel('$b_{GC}/b_{ME}$',color='m',fontsize=16)
plt.title('Comparison of Distances $b_{GC}$ and $b_{ME}$ between Particles',color='m',fontsize=16)
plt.xlim([-5000,indxTestMax+5000])
# plt.ylim([.9*min(b_gc_ME_rel),1.1*max(b_gc_ME_rel)])
plt.grid(True)
if (saveFilesFlag == 1):
fig2070.savefig('picturesCMA_v7/particleDistanceComprsn_gc_me_fig2070cma.png')
print ('File "picturesCMA_v7/particleDistanceComprsn_gc_me_fig2070cma.png" is written')
#
# Ratio b_gc/b_ME (relative value):
#
if (plotFigureFlag == 0):
fig2080=plt.figure (2080)
# plt.semilogy(nn,actn_gc_ME_rel[0:indxTestMax-1],'.r')
plt.plot(nn,actn_gc_ME_rel[0:indxTestMax-1],'.r')
plt.xlabel('Points of Tracks',color='m',fontsize=16)
plt.ylabel('$10^7\cdot (J_{GC}/J_{ME}$ $-$ $1)$',color='m',fontsize=16)
plt.title('Comparison of Actions $J_{GC}$ and $J_{ME}$',color='m',fontsize=16)
plt.xlim([-5000,indxTestMax+5000])
plt.ylim([.99*min(actn_gc_ME_rel),1.01*max(actn_gc_ME_rel)])
plt.grid(True)
if (saveFilesFlag == 1):
fig2080.savefig('picturesCMA_v7/actionComprsn_gc_me_fig2080cma.png')
print ('File "picturesCMA_v7/actionComprsn_gc_me_fig2080cma.png" is written')
#
# Total length of interaction (1/2 of value):
#
nn=np.arange(0,nVion*nImpctPrmtr,1)
halfLintrTest = np.zeros(nVion*nImpctPrmtr)
for i in range(nVion):
for n in range(nImpctPrmtr):
halfLintrTest[nVion*i+n] = halfLintr[i,n]
if (plotFigureFlag == 0):
fig2090=plt.figure (2090)
plt.semilogy(nn,halfLintrTest,'.r')
plt.xlabel('Points of Tracks',color='m',fontsize=16)
plt.ylabel('$0.5 \cdot L_{Intrctn}$, $cm$',color='m',fontsize=16)
plt.title('Total Length of Interaction: $L_{Intrctn}=2 \cdot [R_{max}^2-rho_{Init}^2)]^{0.5}$', \
color='m',fontsize=16)
plt.xlim([-100,nVion*nImpctPrmtr+100])
plt.ylim([.9*min(halfLintrTest),1.1*max(halfLintrTest)])
plt.grid(True)
if (saveFilesFlag == 1):
fig2090.savefig('picturesCMA/totalLengthIntrsctn_fig2090cma.png')
print ('File "picturesCMA/totalLengthIntrsctn_fig2090cma.png" is written')
#===================================================
#
# There is fitting for correct values of deltaEnrgIon_m
#
#===================================================
#
# Fitting for figures with deltaEnrgIon_m (my own Least Squares Method - LSM;
# Python has own routine for LSM - see site
# http://scipy-cookbook.readthedocs.io/items/FittingData.html):
#
#
# Fittied function:
#
# |deltaEnrgIon| = 10^fitA * rho^fitB,
# so that
#
# log10(|deltaEnrgIon|) = fitB*log10(rho) + fitA
#
# So, the dimension of expression (10^fitA * rho^fitB) is the same
# as deltaEnrgIon, i.e. eV
#
timeStart = os.times()
fitA_dEion = np.zeros(nVion) # dimensionless
fitB_dEion = np.zeros(nVion) # dimensionless
rhoInitFit_dEion = np.zeros((nImpctPrmtr,nVion))
deltaEnrgIon_m_fit = np.zeros((nImpctPrmtr,nVion))
funcHi2_dEion = np.zeros(nVion)
fitA_dEion,fitB_dEion,funcHi2_dEion,rhoInitFit_dEion, deltaEnrgIon_m_fit = \
fitting(nImpctPrmtr,nVion,rhoInit,deltaEnrgIon_m)
dPosA_dEion = np.zeros(nVion)
dNegA_dEion = np.zeros(nVion)
dPosA_dEion,dNegA_dEion = \
errFitAB(nImpctPrmtr,nVion,rhoInit,deltaEnrgIon_m_fit,fitA_dEion,fitB_dEion,funcHi2_dEion,1,2)
dPosB_dEion = np.zeros(nVion)
dNegB_dEion = np.zeros(nVion)
dPosB_dEion,dNegB_dEion = \
errFitAB(nImpctPrmtr,nVion,rhoInit,deltaEnrgIon_m_fit,fitA_dEion,fitB_dEion,funcHi2_dEion,2,2)
# print ('Fitting for deltaEion:')
# for i in range(nVion):
# print ('i=%2d: fitA_dEion = %e (+%e,-%e), fitB_dEion = %e (+%e,-%e), hi2_1 = %e' % \
# (i,fitA_dEion[i],dPosA_dEion[i],dNegA_dEion[i], \
# fitB_dEion[i],dPosB_dEion[i],dNegB_dEion[i],funcHi2_dEion[i]))
#
# Analytical Integration of the fitted dependence 10**A*rho**B.
#
# For this dependece on rho:
#
# Friction force = 10**A*n_e*integral_rhoMin^rhoMax (rho**B*rho)*dRho =
# = 10**A*n_e/(B+2)*[rhoMax**(B+2)-rhoMax**(B+2)] (dimension=eV/cm):
#
frctnForce_AI = np.zeros(nVion)
for i in range(nVion):
factorA1 = math.pow(10.,fitA_dEion[i])
factorB1 = 2.+fitB_dEion[i]
frctnForce_AI[i] = 2.*pi*n_e*100.*factorA1/factorB1* \
(math.pow(impctPrmtrMax[i],factorB1)- \
math.pow(impctPrmtrMin,factorB1)) # eV/m
timeEnd = os.times()
timeFitting = float(timeEnd[0])-float(timeStart[0]) # CPU time , sec
print ('Time of integration = %6.3f seconds' % timeFitting)
#
# Dependences of transferred energy to ion on ion velocity for
# different initial impact parameters:
#
rhoSlctd = [.004,.02,.06,.1]
nRhoSlctd = len(rhoSlctd)
deltaEnrgIon_dpnd_Vi = np.zeros((nRhoSlctd,nVion))
npStart = np.zeros((nRhoSlctd,), dtype=int)
for k in range(nRhoSlctd):
slctdFlag = 0
for i in range(nVion):
if (slctdFlag == 0):
for n in range(nImpctPrmtr):
if (rhoInit[n,i] >= rhoSlctd[k]):
npStart[k] = i
slctdFlag = 1
break
for k in range(nRhoSlctd):
for i in range(npStart[k],nVion,1):
factorA = math.pow(10.,fitA_dEion[i])
deltaEnrgIon_dpnd_Vi[k,i] = factorA*math.pow(rhoSlctd[k],fitB_dEion[i])
# print ('deltaEnrgIon_dpnd_Vi[%d,%d] = %e' %(k,i,deltaEnrgIon_dpnd_Vi[k,i]))
#===================================================
#
# There is fitting of deltaPz_m (these values > 0 always) !!!
#
#===================================================
#
# Fitting for figures with deltaPz_m (my own Least Squares Method - LSM;
# Python has own routine for LSM - see site
# http://scipy-cookbook.readthedocs.io/items/FittingData.html):
#
#
# Fittied function:
#
# deltaPz_m = 10^fitA_pz * rho^fitB_pz,
# so that
#
# log10(deltaPz_m) = fitB_pz*log10(rho) + fitA_pz
#
# So, the dimension of expression (10^fitA_pz * rho^fitB_pz) is the same
# as deltaPz_m, i.e. eV
#
fitA_pz = np.zeros(nVion) # dimensionless
fitB_pz = np.zeros(nVion) # dimensionless
rhoInitFit_pz = np.zeros((nImpctPrmtr,nVion))
deltaPz_m_fit = np.zeros((nImpctPrmtr,nVion))
fitA_pz,fitB_pz,funcHi2_pz,rhoInitFit_pz, deltaPz_m_fit = \
fitting(nImpctPrmtr,nVion,rhoInit,deltaPz_m)
dPosA_pz = np.zeros(nVion)
dNegA_pz = np.zeros(nVion)
dPosA_pz,dNegA_pz = \
errFitAB(nImpctPrmtr,nVion,rhoInit,deltaPz_m_fit,fitA_pz,fitB_pz,funcHi2_pz,1,2)
dPosB_pz = np.zeros(nVion)
dNegB_pz = np.zeros(nVion)
dPosB_pz,dNegB_pz = \
errFitAB(nImpctPrmtr,nVion,rhoInit,deltaPz_m_fit,fitA_pz,fitB_pz,funcHi2_pz,2,2)
# print ('Fitting fordeltaPz_m:')
# for i in range(nVion):
# print ('i=%2d: fitA_pz = %e (+%e,-%e), fitB_pz = %e (+%e,-%e), hi2_1 = %e' % \
# (i,fitA_pz[i],dPosA_pz[i],dNegA_pz[i], \
# fitB_pz[i],dPosB_pz[i],dNegB_pz[i],funcHi2_pz[i]))
# print ('<fitA_pz> = %e +- %e' % (mean(fitA_pz),mean(dNegA_pz)))
# print ('<fitB_pz> = %e +- %e' % (mean(fitB_pz),mean(dNegB_pz)))
#===================================================
#
# There is fitting of deltaPx_m (these values > 0 always) !!!
#
#===================================================
#
rhoInitFit_px = np.zeros((nImpctPrmtr,nVion))
deltaPx_m_fit = np.zeros((nImpctPrmtr,nVion))
funcHi2__px = np.zeros(nVion)
fitA_px = np.zeros(nVion) # dimensionless
fitB_px = np.zeros(nVion) # dimensionless
fitA_px,fitB_px,funcHi2_px,rhoInitFit_px, deltaPx_m_fit = \
fitting(nImpctPrmtr,nVion,rhoInit,deltaPx_m)
dPosA_px = np.zeros(nVion)
dNegA_px = np.zeros(nVion)
dPosA_px,dNegA_px = \
errFitAB(nImpctPrmtr,nVion,rhoInit,deltaPx_m_fit,fitA_px,fitB_px,funcHi2_px,1,2)
dPosB_px = np.zeros(nVion)
dNegB_px = np.zeros(nVion)
dPosB_px,dNegB_px = \
errFitAB(nImpctPrmtr,nVion,rhoInit,deltaPx_m_fit,fitA_px,fitB_px,funcHi2_px,2,2)
# print ('Fitting for deltaPx_m:')
# for i in range(nVion):
# print ('i=%2d: fitA_px = %e (+%e,-%e), fitB_px = %e (+%e,-%e), hi2_1 = %e' % \
# (i,fitA_px[i],dPosA_px[i],dNegA_px[i], \
# fitB_px[i],dPosB_px[i],dNegB_px[i],funcHi2_px[i]))
xLimit = [1.015*np.log10(VionRel[0]),.95*np.log10(VionRel[nVion-1])]
yLimMin = 0.
yLimMax = 10.*min(fitA_pz)
if (min(fitA_pz) > 0):
yLimMin = 10.*max(fitA_pz)
yLimMax = 0.
for i in range(nVion):
if (fitA_pz[i] - dNegA_pz[i]) < yLimMin:
yLimMin = fitA_pz[i] - dNegA_pz[i]
if (fitA_pz[i] + dPosA_pz[i]) > yLimMax:
yLimMax = fitA_pz[i] + dPosA_pz[i]
# print ('Exponent A (pz): yLimMin = %e, yLimMax = %e' % (yLimMin,yLimMax))
yLimit = [yLimMin-.25,yLimMax+.25]
if (plotFigureFlag == 0):
fig3000=plt.figure (3000)
plt.errorbar(np.log10(VionRel),fitA_pz,yerr=[dNegA_pz,dPosA_pz],fmt='-ro', \
ecolor='b',capsize=5,capthick=1)
plt.xlabel('Relative Ion Velocity, $log_{10}(V_{ion}/V_0)$',color='m',fontsize=14)
plt.ylabel('Exponent $A$', color='m',fontsize=14)
titleHeader = 'Dependence of Transferred Momenta to Single Ion: '
titleHeader += '$\Delta P_z$ = $10^A\cdot rho^B$'
plt.title(titleHeader,color='m',fontsize=12)
plt.text(-3.75,-26.0,('$V_{e0}=%5.3f\cdot10^{%2d}$cm/s' % (mantV0,powV0)), \
color='m',fontsize=16)
plt.text(-4.0,-28.,('<A>=%7.3f $\pm$ %5.3f' % (mean(fitA_pz),mean(dNegA_pz))), \
color='r',fontsize=16)
# plt.text(-3.25,-29.65,('$-$%5.3f' % (mean(dNegA_pz))),color='r',fontsize=12)
# plt.text(-3.25,-29.15,('$+$%5.3f' % (mean(dPosA_pz))),color='r',fontsize=12)
plt.xlim(xLimit)
plt.ylim(yLimit)
plt.grid(True)
plt.plot([np.log10(relVeTrnsv),np.log10(relVeTrnsv)],yLimit,'--m',linewidth=1)
plt.text(-2.55,-28.25,'$ \Delta V_{e\perp}/ V_{e0}$',color='m',fontsize=14)
plt.plot([np.log10(relVeLong),np.log10(relVeLong)],yLimit,'--m',linewidth=1)
plt.text(-4.24,-28.25,'$ \Delta V_{e||}/ V_{e0}$',color='m',fontsize=14)
if (saveFilesFlag == 1):
fig3000.savefig('picturesCMA_v7/fitA_dPz_fig3000cma.png')
print ('File "picturesCMA_v7/fitA_dPz_fig3000cma.png" is written')
yLimMin = 0.
yLimMax = 10.*min(fitB_pz)
if (min(fitB_pz) > 0):
yLimMin = 10.*max(fitB_pz)
yLimMax = 0.
for i in range(nVion):
if (fitB_pz[i] - dNegB_pz[i]) < yLimMin:
yLimMin = fitB_pz[i] - dNegB_pz[i]
if (fitB_pz[i] + dPosB_pz[i]) > yLimMax:
yLimMax = fitB_pz[i] + dPosB_pz[i]
# print ('Exponent B (pz): yLimMin = %e, yLimMax = %e' % (yLimMin,yLimMax))
yLimit = [yLimMin-.1,yLimMax+.1]
if (plotFigureFlag == 0):
fig3010=plt.figure (3010)
plt.errorbar(np.log10(VionRel),fitB_pz,yerr=[dNegB_pz,dPosB_pz],fmt='-ro', \
ecolor='b',capsize=5,capthick=1)
plt.xlabel('Relative Ion Velocity, $log_{10}(V_{ion}/V_0)$',color='m',fontsize=14)
plt.ylabel('Exponent $B$', color='m',fontsize=14)
titleHeader = 'Dependence of Transferred Momenta to Single Ion: '
titleHeader += '$\Delta P_z$ = $10^A\cdot rho^B$'
plt.title(titleHeader,color='m',fontsize=12)
plt.text(-3.75,-.87,('$V_{e0}=%5.3f\cdot10^{%2d}$cm/s' % (mantV0,powV0)), \
color='m',fontsize=16)
plt.text(-3.9,-1.55,('<B>=%6.3f $\pm$ %5.3f' % (mean(fitB_pz),mean(dNegB_pz))), \
color='r',fontsize=16)
# plt.text(-2.85,-2.25,('$-$%5.3f' % (mean(dNegB_pz))),color='r',fontsize=12)
# plt.text(-2.85,-1.75,('$+$%5.3f' % (mean(dPosB_pz))),color='r',fontsize=12)
plt.xlim(xLimit)
plt.ylim(yLimit)
plt.grid(True)
plt.plot([np.log10(relVeTrnsv),np.log10(relVeTrnsv)],yLimit,'--m',linewidth=1)
plt.text(-2.55,-1.74,'$ \Delta V_{e\perp}/ V_{e0}$',color='m',fontsize=14)
plt.plot([np.log10(relVeLong),np.log10(relVeLong)],yLimit,'--m',linewidth=1)
plt.text(-4.24,-1.74,'$ \Delta V_{e||}/ V_{e0}$',color='m',fontsize=14)
if (saveFilesFlag == 1):
fig3010.savefig('picturesCMA_v7/fitB_dPz_fig3010cma.png')
print ('File "picturesCMA_v7/fitB_dPz_fig3010cma.png" is written')
yLimMin = 0.
yLimMax = 10.*min(fitA_px)
if (min(fitA_px) > 0):
yLimMin = 10.*max(fitA_px)
yLimMax = 0.
for i in range(nVion):
if (fitA_px[i] - dNegA_px[i]) < yLimMin:
yLimMin = fitA_px[i] - dNegA_px[i]
if (fitA_px[i] + dPosA_px[i]) > yLimMax:
yLimMax = fitA_px[i] + dPosA_px[i]
# print ('Exponent A (px): yLimMin = %e, yLimMax = %e' % (yLimMin,yLimMax))
yLimit = [yLimMin-.15,yLimMax+.15]
if (plotFigureFlag == 0):
fig3020=plt.figure (3020)
plt.errorbar(np.log10(VionRel),fitA_px,yerr=[dNegA_px,dPosA_px],fmt='-ro', \
ecolor='b',capsize=5,capthick=1)
plt.xlabel('Relative Ion Velocity, $log_{10}(V_{ion}/V_0)$',color='m',fontsize=14)
plt.ylabel('Exponent $A$', color='m',fontsize=14)
titleHeader = 'Dependence of Transferred Momenta to Single Ion: '
titleHeader += '$\Delta P_x$ = $10^A\cdot rho^B$'
plt.title(titleHeader,color='m',fontsize=12)
plt.text(-3.75,-24.2,('$V_{e0}=%5.3f\cdot10^{%2d}$cm/s' % (mantV0,powV0)), \
color='m',fontsize=16)
plt.text(-3.9,-24.8,('<A>=%6.3f $\pm$ %5.3f' % (mean(fitA_px),mean(dNegA_px))), \
color='r',fontsize=16)
plt.xlim(xLimit)
plt.ylim(yLimit)
plt.grid(True)
plt.plot([np.log10(relVeTrnsv),np.log10(relVeTrnsv)],yLimit,'--m',linewidth=1)
plt.text(-2.55,-25.05,'$ \Delta V_{e\perp}/ V_{e0}$',color='m',fontsize=14)
plt.plot([ | np.log10(relVeLong) | numpy.log10 |
import numpy as np
import cv2
import cv2.aruco as aruco
import math
"""
**************************************************************************
* E-Yantra Robotics Competition
* ================================
* This software is intended to check version compatiability of open source software
* Theme: Thirsty Crow
* MODULE: Task1.1
* Filename: detect.py
* Version: 1.0.0
* Date: October 31, 2018
*
* Author: e-Yantra Project, Department of Computer Science
* and Engineering, Indian Institute of Technology Bombay.
*
* Software released under Creative Commons CC BY-NC-SA
*
* For legal information refer to:
* http://creativecommons.org/licenses/by-nc-sa/4.0/legalcode
*
*
* This software is made available on an “AS IS WHERE IS BASIS”.
* Licensee/end user indemnifies and will keep e-Yantra indemnified from
* any and all claim(s) that emanate from the use of the Software or
* breach of the terms of this agreement.
*
* e-Yantra - An MHRD project under National Mission on Education using
* ICT(NMEICT)
*
**************************************************************************
"""
####################### Define Utility Functions Here ##########################
"""
Function Name : getCameraMatrix()
Input: None
Output: camera_matrix, dist_coeff
Purpose: Loads the camera calibration file provided and returns the camera and
distortion matrix saved in the calibration file.
"""
def getCameraMatrix():
with np.load('Camera.npz') as X:
camera_matrix, dist_coeff, _, _ = [X[i] for i in ('mtx','dist','rvecs','tvecs')]
return camera_matrix, dist_coeff
"""
Function Name : sin()
Input: angle (in degrees)
Output: value of sine of angle specified
Purpose: Returns the sine of angle specified in degrees
"""
def sin(angle):
return math.sin(math.radians(angle))
"""
Function Name : cos()
Input: angle (in degrees)
Output: value of cosine of angle specified
Purpose: Returns the cosine of angle specified in degrees
"""
def cos(angle):
return math.cos(math.radians(angle))
################################################################################
"""
Function Name : detect_markers()
Input: img (numpy array), camera_matrix, dist_coeff
Output: aruco list in the form [(aruco_id_1, centre_1, rvec_1, tvec_1),(aruco_id_2,
centre_2, rvec_2, tvec_2), ()....]
Purpose: This function takes the image in form of a numpy array, camera_matrix and
distortion matrix as input and detects ArUco markers in the image. For each
ArUco marker detected in image, paramters such as ID, centre coord, rvec
and tvec are calculated and stored in a list in a prescribed format. The list
is returned as output for the function
"""
def detect_markers(img, camera_matrix, dist_coeff):
markerLength = 100
aruco_list = []
######################## INSERT CODE HERE ########################
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
aruco_dict = aruco.Dictionary_get(aruco.DICT_5X5_250)
parameters = aruco.DetectorParameters_create()
corners, ids, _ = aruco.detectMarkers(gray, aruco_dict, parameters = parameters)
with np.load('Camera.npz') as X:
camera_matrix, dist_coeff, _, _ = [X[i] for i in ('mtx','dist','rvecs','tvecs')]
corners, ids, _ = aruco.detectMarkers(gray, aruco_dict, parameters = parameters)
rvec, tvec,_= aruco.estimatePoseSingleMarkers(corners, markerLength, camera_matrix, dist_coeff)
j=0
for i in ids:
Xc=(corners[j][0][0][0]+corners[j][0][1][0]+corners[j][0][2][0]+corners[j][0][3][0])/4
Yc=(corners[j][0][0][1]+corners[j][0][1][1]+corners[j][0][2][1]+corners[j][0][3][1])/4
aruco_centre=(Xc,Yc)
rvect = np.array([rvec[j].reshape(1,1,3)])
tvect = np.array([tvec[j].reshape(1,1,3)])
tup1 = (int(i),aruco_centre,rvect[0],tvect[0])
aruco_list.append(tup1)
j=j+1
print(aruco_list)
##################################################################
return aruco_list
"""
Function Name : drawAxis()
Input: img (numpy array), aruco_list, aruco_id, camera_matrix, dist_coeff
Output: img (numpy array)
Purpose: This function takes the above specified outputs and draws 3 mutually
perpendicular axes on the specified aruco marker in the image and
returns the modified image.
"""
def drawAxis(img, aruco_list, aruco_id, camera_matrix, dist_coeff):
for x in aruco_list:
if aruco_id == x[0]:
rvec, tvec = x[2], x[3]
markerLength = 100
m = markerLength/2
pts = np.float32([[-m,m,0],[m,m,0],[-m,-m,0],[-m,m,m]])
pt_dict = {}
imgpts, _ = cv2.projectPoints(pts, rvec, tvec, camera_matrix, dist_coeff)
for i in range(len(pts)):
pt_dict[tuple(pts[i])] = tuple(imgpts[i].ravel())
src = pt_dict[tuple(pts[0])]; dst1 = pt_dict[tuple(pts[1])];
dst2 = pt_dict[tuple(pts[2])]; dst3 = pt_dict[tuple(pts[3])];
img = cv2.line(img, src, dst1, (0,255,0), 4)
img = cv2.line(img, src, dst2, (255,0,0), 4)
img = cv2.line(img, src, dst3, (0,0,255), 4)
return img
"""
Function Name : drawCube()
Input: img (numpy array), aruco_list, aruco_id, camera_matrix, dist_coeff
Output: img (numpy array)
Purpose: This function takes the above specified outputs and draws a cube
on the specified aruco marker in the image and returns the modified
image.
"""
def drawCube(img, ar_list, ar_id, camera_matrix, dist_coeff):
for x in ar_list:
if ar_id == x[0]:
rvec, tvec = x[2], x[3]
markerLength = 100
m = markerLength/2
######################## INSERT CODE HERE ########################
pts = np.float32([[-m,m,0],[m,m,0],[-m,-m,0],[-m,m,m]])
pt_dict = {}
imgpts, _ = cv2.projectPoints(pts, rvec, tvec, camera_matrix, dist_coeff)
for i in range(len(pts)):
pt_dict[tuple(pts[i])] = tuple(imgpts[i].ravel())
src = pt_dict[tuple(pts[0])]; dst1 = pt_dict[tuple(pts[1])];
dst2 = pt_dict[tuple(pts[2])]; dst3 = pt_dict[tuple(pts[3])];
img = cv2.line(img, src, dst1, (0,0,255), 4)
img = cv2.line(img, src, dst2, (0,0,255), 4)
img = cv2.line(img, src, dst3, (0,0,255), 4)
pts = | np.float32([[m,-m,0],[m,m,0],[-m,-m,0],[m,-m,m]]) | numpy.float32 |
#!/usr/bin/env python
# On 20130210, v0.2
# Critical Line Algorithm
# by MLdP <<EMAIL>>
import numpy as np
#---------------------------------------------------------------
#---------------------------------------------------------------
class CLA:
def __init__(self,mean,covar,lB,uB):
# Initialize the class
self.mean=mean
self.covar=covar
self.lB=lB
self.uB=uB
self.w=[] # solution
self.l=[] # lambdas
self.g=[] # gammas
self.f=[] # free weights
#---------------------------------------------------------------
def solve(self):
tmp = 100.
# Compute the turning points,free sets and weights
f,w=self.initAlgo()
self.w.append(np.copy(w)) # store solution
self.l.append(None)
self.g.append(None)
self.f.append(f[:])
while True:
#1) case a): Bound one free weight
l_in=float('-Inf')
if len(f)>1:
covarF,covarFB,meanF,wB=self.getMatrices(f)
covarF_inv=np.linalg.inv(covarF)
j=0
for i in f:
l,bi=self.computeLambda(covarF_inv,covarFB,meanF,wB,j,[self.lB[i],self.uB[i]])
if l>l_in:l_in,i_in,bi_in=l,i,bi
j+=1
#2) case b): Free one bounded weight
l_out=float('-Inf')
if len(f)<self.mean.shape[0]:
b=self.getB(f)
for i in b:
covarF,covarFB,meanF,wB=self.getMatrices(f+[i])
covarF_inv=np.linalg.inv(covarF)
l,bi=self.computeLambda(covarF_inv,covarFB,meanF,wB,meanF.shape[0]-1, \
self.w[-1][i])
if self.l == [None] :
# print ('self.l is None\n')
self.l = [float('-Inf')]
# print (self.l[-1], l, l_out)
if (self.l[-1]==float('-Inf') or l<self.l[-1]) and l>l_out:
l_out,i_out=l,i
# print ('\n***** 2a *****\n', l_in, i_out, '\n')
# print ('\n***** 2b *****\n', l_in, i_out, '\n')
if (l_in==None or l_in<0) and (l_out==None or l_out<0):
#print ('\n***** 3 *****\n', l_in, l_out, '\n')
#3) compute minimum variance solution
self.l.append(0)
covarF,covarFB,meanF,wB=self.getMatrices(f)
covarF_inv= | np.linalg.inv(covarF) | numpy.linalg.inv |
import sys
import os
import numpy
import numpy as np
nimages = 800
ntiles_h = 2
ntiles_w = 3
nbudget_fraction = 0.25
tile_size = 320
pyramid_file_prefix = 'tiles_var_pyramid'
saliency_file_prefix = 'tiles_saliency'
pyramid_scales = [6, 3, 0]
max_sample_rate = 4
# using saliency, pyramid scale 4, 2, 0 on mean response, rescale first 25% nonzero score to the range of 0 - 1, then sample probability in x
# each partition samples 300 tiles
def main():
img_dir = sys.argv[1]
cam_dir = sys.argv[2]
mode = sys.argv[3]
pyramid_file = os.path.join(img_dir, pyramid_file_prefix + '_0_2.npy' )
if not os.path.exists(pyramid_file):
pyramid_file = os.path.join(img_dir, pyramid_file_prefix + '.npy')
pyramid_score = np.load(pyramid_file)
saliency_file = os.path.join(img_dir, saliency_file_prefix + '.npy')
saliency_score = np.load(saliency_file)
all_chosen_indices = np.zeros(pyramid_score.shape[0]).astype('bool')
invalid_indices = np.ones(pyramid_score.shape[0]).astype('bool')
indices_vals = []
min_score_pct = 0
allow_random = True
nbudget = int(nbudget_fraction * saliency_score.shape[0])
scores = [saliency_score, pyramid_score[:, 8], pyramid_score[:, 4], pyramid_score[:, 0]]
min_score_pct = 75
allow_random = False
if allow_random:
sample_partition = nbudget // (len(scores) + 1)
else:
sample_partition = nbudget // len(scores)
for current_score in scores:
if not np.allclose(current_score, saliency_score):
# record which indices are valid for this certain metric before drawing random samples
invalid_indices[current_score > 0] = False
# draw random partition first to include fewer less interesting tiles
if allow_random:
sample_prob = np.ones(pyramid_score.shape[0])
sample_prob[invalid_indices] = 0
sample_prob /= np.sum(sample_prob)
sampled_ind = np.random.choice(np.arange(pyramid_score.shape[0]), sample_partition, replace=False, p=sample_prob)
all_chosen_indices[sampled_ind] = True
indices_vals = np.concatenate((indices_vals, sampled_ind)).astype('i')
for current_score in scores:
# mask out indices that are already chosen, so we can sample without replacement
current_score[all_chosen_indices] = 0
max_score = np.max(current_score)
min_score = | np.min(current_score[current_score > 0]) | numpy.min |
#----------------------------------------------------------------------------------------------------
'''
skmm.py
This file contains the definition of related functions for kernal mean matching
Coded by <NAME>
Date: 2018-11-25
All Rights Reserved.
'''
#----------------------------------------------------------------------------------------------------
import numpy as np
import random
import scipy.linalg as la
from datetime import *
from cala import *
from kernel import *
from nmse import *
class skmm(object):
def __init__(self, X, Y, cY, m, nSam, **kwargs):
self.__X = X
self.__Y = Y
self.__cY = cY
self.__m = m
self.__nSam = nSam
self.__mx = getMean(Y)
self.__xDim, self.__xSam = np.shape(X)
self.__yDim, self.__ySam = np.shape(Y)
self.__cDim, self.__cSam = np.shape(cY)
self.__xMean = getMean(X)
self.__xStd = getStd(X, self.__xMean)
self.__xBeta = getProb(X, self.__xMean, self.__xStd)
self.__kw = getKWidth(X)
self.__Kxx = xysK(X, X, 'Gaussian', self.__kw)
self.__Kxy = xysK(X, Y, 'Gaussian', self.__kw)
#self.__Kxx = xyK(X, X, 'Gaussian')
#self.__Kxy = xyK(X, Y, 'Gaussian')
#def updMean(self, X, mx, Y):
def updMean(self, X, Y):
xDim, xSam = np.shape(X)
yDim, ySam = np.shape(Y)
assert xDim == yDim, 'The dimensionality of X and Y are not identical !'
mx = self.__mx
n = xSam + ySam
for i in range(xDim):
mx[i] = mx[i] * xSam
for j in range(ySam):
mx[i] = mx[i] + Y[i][j]
mx[i] = mx[i] / n
self.__mx = mx
return mx
def updY(self, X, tX):
xDim, xSam = np.shape(X)
tDim, tSam = np.shape(Y)
assert xDim == tDim, 'The dimensionality of X and tX are not identical !'
n = xSam + tSam
Y = np.column_stack((X, tX))
return Y
def getAind(self, X, n):
xDim, xSam = np.shape(X)
tmk = xysK(X, X, 'Gaussian', self.__kw) # cannot replaced with self.__Kxy
tm = np.sum(tmk, axis=0)
assert len(tm) == xSam, 'The direction of operation may be incorrect !'
idx = np.argsort(- tm)
ix = idx[0:n]
return ix
def getBind(self, X, n, rn):
xDim, xSam = | np.shape(X) | numpy.shape |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue May 26 11:00:07 2020
@author: <NAME>
"""
import matplotlib.pyplot as plt
from scipy.spatial import distance
from scipy import signal
import numpy as np
# Constants
DEFAULT_NEURONUM = 500
DEFAULT_TEND = 7000
DEFAULT_IDRIVE = 3
DEFAULT_XNUME = 20
DEFAULT_YNUME = 20
DEFAULT_XNUMI = 10
DEFAULT_YNUMI = 10
DEFAULT_DEGREE_EE = 40
DEFAULT_DEGREE_EI = 10
DEFAULT_DEGREE_IE = 400
DEFAULT_DEGREE_II = 100
DEFAULT_WEIGHT_EE = 0.01
DEFAULT_WEIGHT_EI = 0.05
DEFAULT_WEIGHT_IE = 0.04
DEFAULT_WEIGHT_II = 0.04
DEFAULT_TAU_SYN = 3
DEFAULT_GKS_MIN = 0.2
DEFAULT_GKS_MAX = 1.5
# Class
class NeuroNet():
def __init__(self,
neuroNum = DEFAULT_NEURONUM,
tEnd = DEFAULT_TEND,
Idrive = DEFAULT_IDRIVE,
tauSyn = DEFAULT_TAU_SYN,
gKsMin = DEFAULT_GKS_MIN,
gKsMax = DEFAULT_GKS_MAX):
'''
Parameters
----------
neuroNum : TYPE, optional
DESCRIPTION. The default is DEFAULT_NEURONUM.
tEnd : TYPE, optional
DESCRIPTION. The default is DEFAULT_TEND.
Idrive : TYPE, optional
DESCRIPTION. The default is DEFAULT_IDRIVE.
tauSyn : TYPE, optional
DESCRIPTION. The default is DEFAULT_TAU_SYN.
Returns
-------
None.
'''
# simulation properties
self.tEnd = tEnd # ms
self.tStep = 0.05 # ms
self.tPoints = np.arange(0,self.tEnd,self.tStep)
# ensemble properties
self.neuroNum = neuroNum
self.Idrive = Idrive*np.ones(shape=(self.neuroNum,1))
# neuronal properties
self.gKsMin = gKsMin
self.gKsMax = gKsMax
self.randomInitialStates()
self.gKs = self.gKsMax
# initial adjMat
self.adjMat = np.zeros(shape=(self.neuroNum,self.neuroNum))
self.Esyn = np.zeros((self.neuroNum,1))
# 0 mV for excitatory synapses;
# -75mV for inhibitory synapses
self.tauSyn = DEFAULT_TAU_SYN*np.ones((self.neuroNum,1)) # ms
def randomInitialStates(self):
self.states = np.random.rand(self.neuroNum,4)
self.states[:,3] = -70 + 40 * self.states[:,3]
return self
def zerolikeInitialStates(self,logV=False):
originalDC = self.Idrive.copy()
originalT = self.tEnd
self.Idrive[:] = -1
self.tEnd = 500
self.tPoints = np.arange(0,self.tEnd,self.tStep) - self.tEnd
self.runSimulation(isNet = False,logV=logV)
if logV: self.tPoints_before = self.tPoints.copy()
self.Idrive = originalDC
self.tEnd = originalT
self.tPoints = np.arange(0,self.tEnd,self.tStep)
return self
def mexicanHat(self,
xNumE = DEFAULT_XNUME,
yNumE = DEFAULT_YNUME,
xNumI = DEFAULT_XNUMI,
yNumI = DEFAULT_YNUMI,
degreeEE = DEFAULT_DEGREE_EE,
degreeEI = DEFAULT_DEGREE_EI,
degreeIE = DEFAULT_DEGREE_IE,
degreeII = DEFAULT_DEGREE_II,
weightEE = DEFAULT_WEIGHT_EE,
weightEI = DEFAULT_WEIGHT_EI,
weightIE = DEFAULT_WEIGHT_IE,
weightII = DEFAULT_WEIGHT_II):
'''
Parameters
----------
xNumE : TYPE, optional
DESCRIPTION. The default is DEFAULT_XNUME.
yNumE : TYPE, optional
DESCRIPTION. The default is DEFAULT_YNUME.
xNumI : TYPE, optional
DESCRIPTION. The default is DEFAULT_XNUMI.
yNumI : TYPE, optional
DESCRIPTION. The default is DEFAULT_YNUMI.
degreeEE : TYPE, optional
DESCRIPTION. The default is DEFAULT_DEGREE_EE.
degreeEI : TYPE, optional
DESCRIPTION. The default is DEFAULT_DEGREE_EI.
weightEE : TYPE, optional
DESCRIPTION. The default is DEFAULT_WEIGHT_EE.
weightEI : TYPE, optional
DESCRIPTION. The default is DEFAULT_WEIGHT_EI.
weightIE : TYPE, optional
DESCRIPTION. The default is DEFAULT_WEIGHT_IE.
weightII : TYPE, optional
DESCRIPTION. The default is DEFAULT_WEIGHT_II.
Returns
-------
None.
'''
self.numE = xNumE * yNumE
self.xNumE,self.yNumE = xNumE,yNumE
self.numI = self.neuroNum - self.numE
self.xNumI,self.yNumI = xNumI,yNumI
if self.numI != xNumI * yNumI:
print('ERROR!!')
self.Esyn[-self.numI:,:] = -75 # mV
# assign x, y coordinates
xLocE = np.arange(xNumE) + 0.5 # + 0.5 for periodic condition
yLocE = np.arange(yNumE) + 0.5
xLocE,yLocE = np.meshgrid(xLocE,yLocE)
self.coordsE = np.stack((xLocE.reshape(-1),yLocE.reshape(-1))).T
xLocI = (np.arange(xNumI) + 0.5) * (xNumE / xNumI)
yLocI = (np.arange(yNumI) + 0.5) * (yNumE / yNumI)
xLocI,yLocI = np.meshgrid(xLocI,yLocI)
self.coordsI = np.stack((xLocI.reshape(-1),yLocI.reshape(-1))).T
# compute mexican-hat adjacency matrix
# compute distance matrices
distEE = distance.cdist(self.coordsE,self.coordsE,
lambda a,b: self.computeDist(a,b))
distEI = distance.cdist(self.coordsI,self.coordsE,
lambda a,b: self.computeDist(a,b))
self.distEE = distEE
self.distEI = distEI
# compute adjEE and adjEI
if degreeEE >= self.numE:
adjMatEE = weightEE * np.ones(shape = (self.numE,self.numE))
else:
adjMatEE = np.zeros(shape = (self.numE,self.numE))
adjMatEE[
np.argsort(distEE,axis = 0)[1:degreeEE+1,:].T.reshape(-1),
np.concatenate(
[i*np.ones(degreeEE,dtype=int) for i in np.arange(self.numE)])
] = weightEE
if degreeEI >= self.numI:
adjMatEI = weightEI * np.ones(shape = (self.numI,self.numE))
else:
adjMatEI = np.zeros(shape = (self.numI,self.numE))
adjMatEI[
np.argsort(distEI,axis = 0)[:degreeEI,:].T.reshape(-1),
np.concatenate(
[i*np.ones(degreeEI,dtype=int) for i in np.arange(self.numE)])
] = weightEI
# compute adjIE and adjII: all to all connection if degree < # of cells
if degreeIE >= self.numE:
adjMatIE = weightIE * np.ones(shape = (self.numE,self.numI))
else:
distIE = distance.cdist(self.coordsE,self.coordsI,
lambda a,b: self.computeDist(a, b))
adjMatIE = np.zeros(shape = (self.numE,self.numI))
adjMatIE[
np.argsort(distIE,axis=0)[:degreeIE,:].T.reshape(-1),
np.concatenate(
[i*np.ones(degreeIE,dtype=int) for i in np.arange(self.numI)])
] = weightIE
if degreeII >= self.numI:
adjMatII = weightII * np.ones(shape = (self.numI,self.numI))
else:
distII = distance.cdist(self.coordsI,self.coordsI,
lambda a,b: self.computeDist(a,b))
adjMatII = np.zeros(shape = (self.numI,self.numI))
adjMatII[
np.argsort(distII,axis = 0)[1:degreeII+1,:].T.reshape(-1),
np.concatenate(
[i*np.ones(degreeII,dtype=int) for i in np.arange(self.numI)])
] = weightII
# finally get the adjMat
self.adjMat = np.vstack((np.hstack((adjMatEE,adjMatIE)),
np.hstack((adjMatEI,adjMatII))))
return self
# compute the euclidean distance with periodic boundary conditions
def computeDist(self,a,b):
bounds = np.array([self.xNumE,self.yNumE])
delta = np.abs(a-b)
delta = np.where(delta > 0.5 * bounds,delta - bounds,delta)
return np.sqrt((delta ** 2).sum(axis = -1))
def mapGks(self,
r,
releaseLocs = np.array([[0.25,0.25],[0.75,0.75]]),
sharpness = 2):
'''
Parameters
----------
releaseLocs : TYPE, optional
DESCRIPTION. The default is np.array([]). Normalized by x,y ranges.
Returns
-------
None.
'''
if releaseLocs.size>0:
self.releaseR = r
self.coordsRelease = np.array([self.xNumE,self.yNumE]) * releaseLocs
distER = (distance.cdist(self.coordsRelease,self.coordsE,
lambda a,b: self.computeDist(a,b))
.min(axis=0).reshape(-1,1))
distIR = (distance.cdist(self.coordsRelease,self.coordsI,
lambda a,b: self.computeDist(a,b))
.min(axis=0).reshape(-1,1))
distToR = np.vstack((distER,distIR))
self.distToR = distToR
sigmoid = lambda x: 1/(1 + np.exp(-x))
# self.sigmoidDistToR = sigmoidDistToR
# sigmoidDistToR -= sigmoidDistToR.min()
self.gKs = self.gKsMin + sigmoid(sharpness*(distToR - r)) * (
self.gKsMax - self.gKsMin)
return self
def runSimulation(self,
isNet = True,
isSTDP = False,
silentSynapse = False,
externalInput = False,
ex_drive_strength = 0.1,
poisson_noise = False,
poisson_rate = 1/200,
poisson_amp = 6,
logV = False):
THRESHOLD_AP = -20 # mV
C = 1 # uf/cm2
v_Na = 55.0 # mV
v_K = -90 # mV
v_L = -60 # mV
g_Na = 24 # mS/cm2
g_Kdr = 3.0 # mS/cm2
g_L = 0.02 # mS/cm2
spikeTimes = np.zeros((self.neuroNum,self.tEnd))
spikeCounts = np.zeros((self.neuroNum,1),dtype=int)
# vPoints = np.zeros(size(tPoints));
channelZ = self.states[:,[0]]
channelH = self.states[:,[1]]
channelN = self.states[:,[2]]
memV = self.states[:,[3]]
if logV:
logCounter = 0
self.vPoints = np.zeros(shape=(self.neuroNum,self.tPoints.size))
# temp current logger
self.iPoints = np.zeros(shape=(self.neuroNum,self.tPoints.size))
colIdx = np.arange(4)
neuroIdx = np.arange(self.neuroNum).reshape(-1,1)
Itotal = self.Idrive
STDPon = False
STDPoff = False
windowIsyn = 20 # ms
### external input ###
if externalInput:
distToRs = []
for releaseId in range(self.num_external_input):
distER = (distance.cdist(self.coordsRelease[[releaseId],:],self.coordsE,
lambda a,b: self.computeDist(a,b))
.reshape(-1,1))
distIR = (distance.cdist(self.coordsRelease[[releaseId],:],self.coordsI,
lambda a,b: self.computeDist(a,b))
.reshape(-1,1))
distToRs.append(np.vstack((distER,
100*np.ones(shape=distIR.shape))))
# self.Idrive = DEFAULT_IDRIVE*np.ones(shape=(self.neuroNum,1))
self.Idrive[distToRs[releaseId]<self.releaseR] = (1+ex_drive_strength) * self.Idrive.min()
### poisson noise ###
if poisson_noise:
poissonRate = poisson_rate #s-1
poissonKickAmp = poisson_amp
poissonKickDur = 1
Ipoisson = 0
# ### temp current logger
# self.meanItotal = 0
for t in self.tPoints:
if logV:
self.vPoints[:,[logCounter]] = memV
self.iPoints[:,[logCounter]] = Itotal
logCounter += 1
# determine synI vector (for sub class NeuroNet)
# and record spike times
isFiring = (memV < THRESHOLD_AP)
if isNet:
EsynMat,memVMat = np.meshgrid(self.Esyn,memV)
expTerm = np.zeros(shape = (self.neuroNum,1))
ithLatestSpike = 1
deltaTs = t - spikeTimes[neuroIdx,spikeCounts-ithLatestSpike]
while ((deltaTs<windowIsyn) & (spikeCounts>ithLatestSpike)).any():
expTerm += ((deltaTs < windowIsyn) &
(spikeCounts>ithLatestSpike)) * np.exp(
-deltaTs /self.tauSyn)
ithLatestSpike += 1
deltaTs = t-spikeTimes[neuroIdx,spikeCounts-ithLatestSpike]
Isyn =self.adjMat * (memVMat - EsynMat) @ expTerm
Itotal = self.Idrive - Isyn
# ### temp current logger
# self.meanItotal += Itotal
### poisson noise ###
if poisson_noise:
if not t%poissonKickDur:
Ipoisson = poissonKickAmp * (np.random.rand(self.neuroNum,1)<poissonRate)
Itotal += Ipoisson
# RK4 method
kV = np.tile(memV,4)
kZ = np.tile(channelZ,4)
kH = np.tile(channelH,4)
kN = np.tile(channelN,4)
for colInd in colIdx:
mInf = 1 / (1 + np.exp((-kV[:,[colInd]]-30.0)/9.5))
hInf = 1 / (1 + np.exp((kV[:,[colInd]]+53.0)/7.0))
nInf = 1 / (1 + np.exp((-kV[:,[colInd]]-30.0)/10))
zInf = 1 / (1 + np.exp((-kV[:,[colInd]]-39.0)/5.0))
hTau = 0.37 + 2.78 / (1 + np.exp((kV[:,[colInd]]+40.5)/6))
nTau = 0.37 + 1.85 / (1 + np.exp((kV[:,[colInd]]+27.0)/15))
fh = (hInf - kH[:,[colInd]]) / hTau
fn = (nInf - kN[:,[colInd]]) / nTau
fz = (zInf - kZ[:,[colInd]]) / 75.0
fv = (1/C)*(g_Na*(mInf**3) * kH[:,[colInd]] *
(v_Na-kV[:,[colInd]]) +
g_Kdr*(kN[:,[colInd]]**4) * (v_K - kV[:,[colInd]])+
self.gKs * kZ[:,[colInd]] * (v_K - kV[:,[colInd]])+
g_L*(v_L-kV[:,[colInd]]) + Itotal)
kH[:,[colInd]] = self.tStep*fh
kN[:,[colInd]] = self.tStep*fn
kZ[:,[colInd]] = self.tStep*fz
kV[:,[colInd]] = self.tStep*fv
if colInd == 0 or colInd == 1:
kH[:,[colInd+1]] = kH[:,[colInd+1]] + 0.5*kH[:,[colInd]]
kN[:,[colInd+1]] = kN[:,[colInd+1]] + 0.5*kN[:,[colInd]]
kZ[:,[colInd+1]] = kZ[:,[colInd+1]] + 0.5*kZ[:,[colInd]]
kV[:,[colInd+1]] = kV[:,[colInd+1]] + 0.5*kV[:,[colInd]]
elif colInd == 2:
kH[:,[colInd+1]] = kH[:,[colInd+1]] + kH[:,[colInd]]
kN[:,[colInd+1]] = kN[:,[colInd+1]] + kN[:,[colInd]]
kZ[:,[colInd+1]] = kZ[:,[colInd+1]] + kZ[:,[colInd]]
kV[:,[colInd+1]] = kV[:,[colInd+1]] + kV[:,[colInd]]
memV = memV + (kV[:,[0]] + 2 * kV[:,[1]] +
2 * kV[:,[2]] + kV[:,[3]])/6.0
channelH = channelH + (kH[:,[0]] + 2 * kH[:,[1]] +
2 * kH[:,[2]] + kH[:,[3]])/6.0
channelN = channelN + (kN[:,[0]] + 2 * kN[:,[1]] +
2 * kN[:,[2]] + kN[:,[3]])/6.0
channelZ = channelZ + (kZ[:,[0]] + 2 * kZ[:,[1]] +
2 * kZ[:,[2]] + kZ[:,[3]])/6.0
# RK4 ends
isFiring &= (memV > THRESHOLD_AP)
### STDP part ###
# when STDP turned on, initialize adjMat_max,A+, A-, tau+,tau- etc.
if STDPon: # if STDP rule is taking place
if not STDPoff:
# if STDP has already been turned off, nothing should be done
# STDP rule taking effect here!
if isFiring.any():
# only change weights when at least one cell is firing
# This if statement can not combine with above one
# to make sure keep track of time to turn off STDP
# iteration for get all the terms
# within cutoff STDP time window
ithLatestSpike = 1
deltaTs = t-spikeTimes[neuroIdx,spikeCounts-ithLatestSpike]
# if spikeCounts is zeros then -1 index leads to time at 0
# deltaWeights = 0
deltaWeightsPlus,deltaWeightsMinus = 0,0
### nearest spike
deltaWeightsPlus += (deltaTs < windowSTDP) * np.exp(
-deltaTs / tauPlus)
deltaWeightsMinus += (deltaTs < windowSTDP) * np.exp(
-deltaTs / tauMinus) * 0.5
# STDPAdjMat[idxPostSyn[(isFiring&depressions)[:numPostSyn]],:] -=
STDPAdjMat[idxPostSyn[isFiring[:numPostSyn]],:] += (
deltaWeightConst[idxPostSyn[isFiring[:numPostSyn]],:]
* deltaWeightsPlus[:numPreSyn].T)
STDPAdjMat[:,idxPreSyn[isFiring[:numPreSyn]]] -= (
deltaWeightConst[:,idxPreSyn[isFiring[:numPreSyn]]]
* deltaWeightsMinus[:numPostSyn])
# make sure weights in [0,weightmax]
STDPAdjMat[STDPAdjMat>STDPAdjMatMax] = STDPAdjMatMax[
STDPAdjMat>STDPAdjMatMax]
STDPAdjMat[STDPAdjMat<STDPAdjMatMin] = STDPAdjMatMin[
STDPAdjMat<STDPAdjMatMin]
# STDP update done!
if t>self.tSTDP_off: # time to turn off STDP rule
STDPoff = True
elif isSTDP and t>self.tSTDP_on: # turn on STDP at the right time
STDPon = True
# initialize important STDP parameters
numPreSyn = self.numE # considering all excitatory synapses
numPostSyn = self.numE
idxPreSyn = | np.arange(numPreSyn) | numpy.arange |
# This file is part of the pyMOR project (https://www.pymor.org).
# Copyright 2013-2021 pyMOR developers and contributors. All rights reserved.
# License: BSD 2-Clause License (https://opensource.org/licenses/BSD-2-Clause)
"""This module provides some operators for finite volume discretizations."""
from functools import partial
import numpy as np
from scipy.sparse import coo_matrix, csc_matrix, dia_matrix
from pymor.algorithms.preassemble import preassemble as preassemble_
from pymor.algorithms.timestepping import ExplicitEulerTimeStepper, ImplicitEulerTimeStepper
from pymor.analyticalproblems.elliptic import StationaryProblem
from pymor.analyticalproblems.functions import Function, LincombFunction
from pymor.analyticalproblems.instationary import InstationaryProblem
from pymor.core.base import abstractmethod
from pymor.core.defaults import defaults
from pymor.tools.deprecated import Deprecated
from pymor.discretizers.builtin.domaindiscretizers.default import discretize_domain_default
from pymor.discretizers.builtin.grids.interfaces import GridWithOrthogonalCenters
from pymor.discretizers.builtin.grids.referenceelements import line, triangle, square
from pymor.discretizers.builtin.grids.subgrid import SubGrid, make_sub_grid_boundary_info
from pymor.discretizers.builtin.gui.visualizers import PatchVisualizer, OnedVisualizer
from pymor.discretizers.builtin.quadratures import GaussQuadratures
from pymor.models.basic import StationaryModel, InstationaryModel
from pymor.operators.constructions import ComponentProjectionOperator, LincombOperator, ZeroOperator
from pymor.operators.interface import Operator
from pymor.operators.numpy import NumpyGenericOperator, NumpyMatrixBasedOperator, NumpyMatrixOperator
from pymor.parameters.base import ParametricObject
from pymor.vectorarrays.numpy import NumpyVectorSpace
def FVVectorSpace(grid, id='STATE'):
return NumpyVectorSpace(grid.size(0), id)
class NumericalConvectiveFlux(ParametricObject):
"""Interface for numerical convective fluxes for finite volume schemes.
Numerical fluxes defined by this interfaces are functions of
the form `F(U_inner, U_outer, unit_outer_normal, edge_volume, mu)`.
The flux evaluation is vectorized and happens in two stages:
1. `evaluate_stage1` receives a |NumPy array| `U` of all values which
appear as `U_inner` or `U_outer` for all edges the flux shall be
evaluated at and returns a `tuple` of |NumPy arrays|
each of the same length as `U`.
2. `evaluate_stage2` receives the reordered `stage1_data` for each
edge as well as the unit outer normal and the volume of the edges.
`stage1_data` is given as follows: If `R_l` is `l`-th entry of the
`tuple` returned by `evaluate_stage1`, the `l`-th entry `D_l` of
of the `stage1_data` tuple has the shape `(num_edges, 2) + R_l.shape[1:]`.
If for edge `k` the values `U_inner` and `U_outer` are the `i`-th
and `j`-th value in the `U` array provided to `evaluate_stage1`,
we have ::
D_l[k, 0] == R_l[i], D_l[k, 1] == R_l[j].
`evaluate_stage2` returns a |NumPy array| of the flux evaluations
for each edge.
"""
@abstractmethod
def evaluate_stage1(self, U, mu=None):
pass
@abstractmethod
def evaluate_stage2(self, stage1_data, unit_outer_normals, volumes, mu=None):
pass
class LaxFriedrichsFlux(NumericalConvectiveFlux):
"""Lax-Friedrichs numerical flux.
If `f` is the analytical flux, the Lax-Friedrichs flux `F` is given
by::
F(U_in, U_out, normal, vol) = vol * [normal⋅(f(U_in) + f(U_out))/2 + (U_in - U_out)/(2*λ)]
Parameters
----------
flux
|Function| defining the analytical flux `f`.
lxf_lambda
The stabilization parameter `λ`.
"""
def __init__(self, flux, lxf_lambda=1.0):
self.__auto_init(locals())
def evaluate_stage1(self, U, mu=None):
return U, self.flux(U[..., np.newaxis], mu)
def evaluate_stage2(self, stage1_data, unit_outer_normals, volumes, mu=None):
U, F = stage1_data
return (np.sum(np.sum(F, axis=1) * unit_outer_normals, axis=1) * 0.5
+ (U[..., 0] - U[..., 1]) * (0.5 / self.lxf_lambda)) * volumes
class SimplifiedEngquistOsherFlux(NumericalConvectiveFlux):
"""Engquist-Osher numerical flux. Simplified Implementation for special case.
For the definition of the Engquist-Osher flux see :class:`EngquistOsherFlux`.
This class provides a faster and more accurate implementation for the special
case that `f(0) == 0` and the derivative of `f` only changes sign at `0`.
Parameters
----------
flux
|Function| defining the analytical flux `f`.
flux_derivative
|Function| defining the analytical flux derivative `f'`.
"""
def __init__(self, flux, flux_derivative):
self.__auto_init(locals())
def evaluate_stage1(self, U, mu=None):
return self.flux(U[..., np.newaxis], mu), self.flux_derivative(U[..., np.newaxis], mu)
def evaluate_stage2(self, stage1_data, unit_outer_normals, volumes, mu=None):
F_edge, F_d_edge = stage1_data
unit_outer_normals = unit_outer_normals[:, np.newaxis, :]
F_d_edge = | np.sum(F_d_edge * unit_outer_normals, axis=2) | numpy.sum |
import sys, random, os
import bpy, bpy_extras
# """
# Some utility functions for interacting with Blender
# """
import numpy as np
import math
from mathutils import Matrix
def binary_mask_to_rle(binary_mask):
rle = {'counts': [], 'size': list(binary_mask.shape)}
counts = rle.get('counts')
last_elem = 0
running_length = 0
for i, elem in enumerate(binary_mask.ravel(order='F')):
if elem == last_elem:
pass
else:
counts.append(running_length)
running_length = 0
last_elem = elem
running_length += 1
counts.append(running_length)
return rle
def create_sub_mask_annotation(sub_mask):
# Find contours (boundary lines) around each sub-mask
# Note: there could be multiple contours if the object
# is partially occluded. (E.g. an elephant behind a tree)
contours = measure.find_contours(sub_mask, 0.5, positive_orientation='low')
segmentations = []
polygons = []
for contour in contours:
# Flip from (row, col) representation to (x, y)
# and subtract the padding pixel
for i in range(len(contour)):
row, col = contour[i]
contour[i] = (col - 1, row - 1)
# Make a polygon and simplify it
poly = Polygon(contour)
poly = poly.simplify(1.0, preserve_topology=False)
polygons.append(poly)
segmentation = np.array(poly.exterior.coords).ravel().tolist()
segmentations.append(segmentation)
# Combine the polygons to calculate the bounding box and area
multi_poly = MultiPolygon(polygons)
x, y, max_x, max_y = multi_poly.bounds
width = max_x - x
height = max_y - y
bbox = (x, y, width, height)
area = multi_poly.area
# annotation = {
# 'segmentation': segmentations,
# 'iscrowd': is_crowd,
# 'image_id': image_id,
# 'category_id': category_id,
# 'id': annotation_id,
# 'bbox': bbox,
# 'area': area
# }
return segmentations, bbox, area
def extract_args(input_argv=None):
"""
Pull out command-line arguments after "--". Blender ignores command-line flags
after --, so this lets us forward command line arguments from the blender
invocation to our own script.
"""
if input_argv is None:
input_argv = sys.argv
output_argv = []
if '--' in input_argv:
idx = input_argv.index('--')
output_argv = input_argv[(idx + 1):]
return output_argv
def parse_args(parser, argv=None):
return parser.parse_args(extract_args(argv))
# I wonder if there's a better way to do this?
def delete_object(obj):
""" Delete a specified blender object """
for o in bpy.data.objects:
o.select = False
obj.select = True
bpy.ops.object.delete()
def get_camera_coords(cam, pos):
"""
For a specified point, get both the 3D coordinates and 2D pixel-space
coordinates of the point from the perspective of the camera.
Inputs:
- cam: Camera object
- pos: Vector giving 3D world-space position
Returns a tuple of:
- (px, py, pz): px and py give 2D image-space coordinates; pz gives depth
in the range [-1, 1]
"""
scene = bpy.context.scene
x, y, z = bpy_extras.object_utils.world_to_camera_view(scene, cam, pos)
scale = scene.render.resolution_percentage / 100.0
w = int(scale * scene.render.resolution_x)
h = int(scale * scene.render.resolution_y)
px = int(round(x * w))
py = int(round(h - y * h))
return (px, py, z)
def set_layer(obj, layer_idx):
""" Move an object to a particular layer """
# Set the target layer to True first because an object must always be on
# at least one layer.
obj.layers[layer_idx] = True
for i in range(len(obj.layers)):
obj.layers[i] = (i == layer_idx)
def add_object(name, loc, cur_render_dir, theta=0):
"""
Load an object from a file. We assume that in the directory object_dir, there
is a file named "$name.blend" which contains a single object named "$name"
that has unit size and is centered at the origin.
- scale: scalar giving the size that the object should be in the scene
- loc: tuple (x, y) giving the coordinates on the ground plane where the
object should be placed.
"""
files = []
with bpy.data.libraries.load("%s/"%cur_render_dir+name+".blend") as (data_from, data_to):
for n in data_from.objects:
if name in n:
files.append({'name': n})
bpy.ops.wm.append(directory="%s/"%cur_render_dir+name+".blend"+"/Object/", files = files)
# Set the new object as active, then rotate, and translate it
x, y = loc
ctx = bpy.context.copy()
obs = []
bpy.context.scene.use_nodes = True
tree = bpy.context.scene.node_tree
# prev = bpy.context.area.type
# bpy.context.area.type = 'NODE_EDITOR'
# area = bpy.context.area
# clear default nodes
# for node in tree.nodes:
# tree.nodes.remove(node)
render = tree.nodes['Render Layers']
links = tree.links
output_node = bpy.context.scene.node_tree.nodes.new('CompositorNodeOutputFile')
output_node.base_path = cur_render_dir
# link = links.new(render.outputs["Image"], output_node.inputs[0])
i = 0
minz = 100000.0
for obj in bpy.data.objects:
if name in obj.name:
mx = obj.matrix_world
minz = min(min((mx * v.co)[2] for v in obj.data.vertices), minz)
# mask_node = bpy.context.scene.node_tree.nodes.new('CompositorNodeIDMask')
# mask_node.index = 0
# link = links.new(render.outputs["IndexOB"], mask_node.inputs["ID value"])
# link = links.new(mask_node.outputs[0], output_node.inputs[0])
for obj in bpy.data.objects:
if name in obj.name:
obj.rotation_euler[2] = theta
obj.location = (x, y, -minz+0.02)
# bpy.ops.transform.translate(value=(x, y, -minz))
# mx = obj.matrix_world
# mx.translation.z -= minz
i += 1
obj.pass_index = i
mask_node = bpy.context.scene.node_tree.nodes.new('CompositorNodeIDMask')
mask_node.index = i
link = links.new(render.outputs["IndexOB"], mask_node.inputs["ID value"])
output_node.layer_slots.new(str(i))
link = links.new(mask_node.outputs[0], output_node.inputs[i])
obs.append(obj)
ctx['active_object'] = obj
else:
obj.hide_render = True
bpy.ops.render.render()
# bpy.context.area.type = prev
for node in bpy.context.scene.node_tree.nodes:
if node.name == "Render Layers": continue
bpy.context.scene.node_tree.nodes.remove(node)
ctx['selected_objects'] = obs
bpy.ops.object.join(ctx)
rotation = []
location = []
for obj in bpy.data.objects:
obj.hide_render = False
if name in obj.name:
obj.name = name
obj.pass_index = 0
rotation = obj.rotation_euler
location = obj.location
# mx = obj.matrix_world
# minz = min((mx * v.co)[2] for v in obj.data.vertices)
# mx.translation.z -= minz
bpy.context.scene.objects.active = bpy.data.objects[name]
return location, rotation
def add_object2(name, loc, rot1, normals, tmp_dir, theta=0.0, mode="ground"):
rot0 = np.array([ [1.0000000, 0.0000000, 0.0000000],
[0.0000000, 0.0000000, -1.0000000],
[0.0000000, 1.0000000, 0.0000000 ]])
sr = random.random()
if 'Cart' in name or 'Refrigerator' in name:
sr = 1.01
if mode == 'support' and sr < 0.7:
tilt0 = [[[ 1.0000000, 0.0000000, 0.0000000],
[0.0000000, -1.0000000, 0.0000000],
[0.0000000, 0.0000, -1.0000000]],
[[ 0.0000000, 1.0000000, 0.0000000],
[1.0000000, 0.0000000, 0.0000000],
[0.0000000, 0.0000, -1.0000000]],
[[ 0.0000000, -1.0000000, 0.0000000],
[-1.0000000, 0.0000000, 0.0000000],
[0.0000000, 0.0000, -1.0000000]]
]
rot1 = random.choice(tilt0)
"""
Load an object from a file. We assume that in the directory object_dir, there
is a file named "$name.blend" which contains a single object named "$name"
that has unit size and is centered at the origin.
- scale: scalar giving the size that the object should be in the scene
- loc: tuple (x, y) giving the coordinates on the ground plane where the
object should be placed.
"""
import math
def rotation_matrix_from_vectors(vec1, vec2):
""" Find the rotation matrix that aligns vec1 to vec2
:param vec1: A 3d "source" vector
:param vec2: A 3d "destination" vector
:return mat: A transform matrix (3x3) which when applied to vec1, aligns it with vec2.
"""
a, b = (vec1 / np.linalg.norm(vec1)).reshape(3), (vec2 / np.linalg.norm(vec2)).reshape(3)
v = np.cross(a, b)
c = np.dot(a, b)
s = np.linalg.norm(v)
kmat = np.array([[0, -v[2], v[1]], [v[2], 0, -v[0]], [-v[1], v[0], 0]])
rotation_matrix = np.eye(3) + kmat + kmat.dot(kmat) * ((1 - c) / (s ** 2))
return rotation_matrix
def isclose(x, y, rtol=1.e-5, atol=1.e-8):
return abs(x-y) <= atol + rtol * abs(y)
def euler_angles_from_rotation_matrix(R):
'''
From a paper by <NAME> (undated),
"Computing Euler angles from a rotation matrix
'''
phi = 0.0
if isclose(R[2,0],-1.0):
theta = math.pi/2.0
psi = math.atan2(R[0,1],R[0,2])
elif isclose(R[2,0],1.0):
theta = -math.pi/2.0
psi = math.atan2(-R[0,1],-R[0,2])
else:
theta = -math.asin(R[2,0])
cos_theta = math.cos(theta)
psi = math.atan2(R[2,1]/cos_theta, R[2,2]/cos_theta)
phi = math.atan2(R[1,0]/cos_theta, R[0,0]/cos_theta)
return psi, theta, phi
import copy
rot1_copy = rot1.copy()
rot1 = np.array(rot1)
tilt = [[[ 1.0000000, 0.0000000, 0.0000000],
[0.0000000, -1.0000000, 0.0000000],
[0.0000000, 0.0000, -1.0000000]],
[[ -1.0000000, 0.0000000, 0.0000000],
[0.0000000, 1.0000000, 0.0000000],
[-0.0000000, 0.0000000, -1.0000000 ]]]
rot1 = np.array(rot1) @ rot0
if mode == "side wall" or mode == "back wall":
if random.random() < 0.75:
theta = 0.0
rot3 = Rz(theta)
files = []
with bpy.data.libraries.load("%s/"%tmp_dir+name+".blend") as (data_from, data_to):
for n in data_from.objects:
if name in n:
files.append({'name': n})
bpy.ops.wm.append(directory="%s/"%tmp_dir+name+".blend"+"/Object/", files = files)
# Set the new object as active, then rotate, and translate it
x, y = loc
ctx = bpy.context.copy()
obs = []
bpy.context.scene.use_nodes = True
tree = bpy.context.scene.node_tree
# prev = bpy.context.area.type
# bpy.context.area.type = 'NODE_EDITOR'
# area = bpy.context.area
# clear default nodes
# for node in tree.nodes:
# tree.nodes.remove(node)
render = tree.nodes['Render Layers']
links = tree.links
output_node = bpy.context.scene.node_tree.nodes.new('CompositorNodeOutputFile')
output_node.base_path = tmp_dir
# link = links.new(render.outputs["Image"], output_node.inputs[0])
minz = 100000.0
first_min = []
second_min = []
if 'Bed' in name or 'Chair' in name:
if rot1_copy == tilt[0]:
miny = -100000.0
if rot1_copy == tilt[1]:
miny = 100000.0
names = []
normal = []
for obj in bpy.data.objects:
if name in obj.name:
names.append(obj.name)
maxz = -10000.0
maxx = -10000.0
minx = 10000.0
maxy = -10000.0
miny = 10000.0
for obj in bpy.data.objects:
if name in obj.name:
obj.rotation_euler = euler_angles_from_rotation_matrix(rot1)
bpy.context.scene.update()
mx = obj.matrix_world
if ('Bed' in name or 'Chair' in name) and (rot1_copy in tilt) and not (mode == 'support' and sr < 0.7):
if "headboard" in obj.name or "back" in obj.name:
mz, idx = min([(mx * v.co)[2], idx] for idx, v in enumerate(obj.data.vertices))
first_min = [(mx * v.co) for v in obj.data.vertices][idx]
if ('Bed' in name or 'Chair' in name) and not (mode == 'support' and sr < 0.7):
if rot1_copy == tilt[0]:
if "seat" in obj.name or "sleep" in obj.name:
my, idx = max([(mx * v.co)[1], idx] for idx, v in enumerate(obj.data.vertices))
second_min = [(mx * v.co) for v in obj.data.vertices][idx]
if rot1_copy == tilt[1]:
if "seat" in obj.name or "sleep" in obj.name:
my, idx = min([(mx * v.co)[1], idx] for idx, v in enumerate(obj.data.vertices))
second_min = [(mx * v.co) for v in obj.data.vertices][idx]
if mode == 'normal':
if "top" in obj.name or "sleep" in obj.name or "seat" in obj.name or "arm" in obj.name or 'Refrigerator' in name:
maxz = max(max((mx * v.co)[2] for v in obj.data.vertices), maxz)
maxx = max(max((mx * v.co)[0] for v in obj.data.vertices), maxz)
minx = min(min((mx * v.co)[0] for v in obj.data.vertices), minz)
maxy = max(max((mx * v.co)[1] for v in obj.data.vertices), maxz)
miny = min(min((mx * v.co)[1] for v in obj.data.vertices), minz)
normal = [maxz, maxx, minx, maxy, miny]
if ('Bed' in name or 'Chair' in name) and (rot1_copy in tilt) and len(first_min) and len(second_min) and not (mode == 'support' and sr < 0.7):
first_min[0] = 0
second_min[0] = 0
dist = math.sqrt((second_min[1] - first_min[1]) ** 2 + (second_min[2] - first_min[2]) ** 2)
new=[0,0,first_min[2]]
if second_min[1] > first_min[1]:
new[1] = first_min[1] + dist
else:
new[1] = first_min[1] - dist
rot2 = rotation_matrix_from_vectors(np.array(second_min - first_min).tolist(), (np.array(new) - np.array(first_min)).tolist())
rot = | np.array(rot3) | numpy.array |
import logging
logger = logging.getLogger(__name__)
import numpy as np
class PrecisionRecall:
def __init__(self, phase, classes):
self.phase = phase # train or val
self.classes = classes
# row is prediction, column is gt
self.confusion_matrix = np.zeros((self.classes, self.classes))
# suppress scientific notation
| np.set_printoptions(suppress=True) | numpy.set_printoptions |
# -*- coding: utf-8 -*-
from copy import deepcopy
import numpy as np
from mrinversion.linear_model._base_l1l2 import _get_augmented_data
from mrinversion.linear_model._base_l1l2 import _get_cv_indexes
def test01():
K = np.empty((8, 16))
indexes = _get_cv_indexes(K, 4, "lasso", f_shape=(4, 4))
index_test = [
[[1, 2, 3, 5, 6, 7], [0, 4]],
[[0, 1, 2, 4, 5, 6], [3, 7]],
[[0, 1, 3, 4, 5, 7], [2, 6]],
[[0, 2, 3, 4, 6, 7], [1, 5]],
]
assert indexes == index_test, "test01"
def test02():
K = np.empty((8, 16))
lst = [
8,
9,
10,
11,
12,
13,
14,
15,
16,
17,
18,
19,
20,
21,
22,
23,
24,
25,
26,
27,
28,
29,
30,
31,
]
index_test = [
[[1, 2, 3, 5, 6, 7], [0, 4]],
[[0, 1, 2, 4, 5, 6], [3, 7]],
[[0, 1, 3, 4, 5, 7], [2, 6]],
[[0, 2, 3, 4, 6, 7], [1, 5]],
]
indexes = _get_cv_indexes(K, folds=4, regularizer="smooth lasso", f_shape=(4, 4))
index_test_1 = deepcopy(index_test)
for tr_, _ in index_test_1:
tr_ += lst
assert indexes == index_test_1, "test02 - 1"
indexes = _get_cv_indexes(K, 4, "smooth lasso", f_shape=16)
index_test_2 = deepcopy(index_test)
for tr_, _ in index_test_2:
tr_ += lst[:15]
assert indexes == index_test_2, "test02 - 2"
def test03():
# 1d - explicit
K = np.empty((5, 5))
s = np.empty((5, 1))
KK, _ = _get_augmented_data(K, s, 1, "smooth lasso", f_shape=(5))
A = [[1, -1, 0, 0, 0], [0, 1, -1, 0, 0], [0, 0, 1, -1, 0], [0, 0, 0, 1, -1]]
assert np.allclose(KK[5:], A)
# 2d - explicit symmetric
K = np.empty((5, 4))
s = np.empty((5, 1))
KK, _ = _get_augmented_data(K, s, 1, "smooth lasso", f_shape=(2, 2))
J1 = [[1, 0, -1, 0], [0, 1, 0, -1]]
J2 = [[1, -1, 0, 0], [0, 0, 1, -1]]
assert np.allclose(KK[5:7], J1)
assert np.allclose(KK[7:9], J2)
# 2d - explicit asymmetric
K = np.empty((5, 6))
s = np.empty((5, 1))
KK, _ = _get_augmented_data(K, s, 1, "smooth lasso", f_shape=(3, 2))
J1 = [
[1, 0, -1, 0, 0, 0],
[0, 1, 0, -1, 0, 0],
[0, 0, 1, 0, -1, 0],
[0, 0, 0, 1, 0, -1],
]
J2 = [[1, -1, 0, 0, 0, 0], [0, 0, 1, -1, 0, 0], [0, 0, 0, 0, 1, -1]]
assert np.allclose(KK[5:9], J1)
assert np.allclose(KK[9:12], J2)
# 1d - function
K = np.empty((5, 12))
KK, _ = _get_augmented_data(K, s, 1, "smooth lasso", f_shape=(12))
A1 = (-1 * np.eye(12) + np.diag(np.ones(11), k=-1))[1:]
assert np.allclose(KK[5:], A1)
# 2d - function symmetric
K = np.empty((5, 16))
s = | np.empty((5, 1)) | numpy.empty |
import os
import numpy as np
import matplotlib.pyplot as plt
from scipy import special
from scipy.interpolate import interp2d
from spectractor.tools import plot_image_simple
from spectractor import parameters
from spectractor.config import set_logger
from spectractor.fit.fitter import FitWorkspace, run_minimisation
from numba import njit
@njit(fastmath=True, cache=True)
def evaluate_moffat1d_unnormalized(y, amplitude, y_c, gamma, alpha): # pragma: nocover
r"""Compute a 1D Moffat function, whose integral is not normalised to unity.
.. math ::
f(y) \propto \frac{A}{\left[ 1 +\left(\frac{y-y_c}{\gamma}\right)^2 \right]^\alpha}
\quad\text{with}, \alpha > 1/2
Note that this function is defined only for :math:`alpha > 1/2`. The normalisation factor
:math:`\frac{\Gamma(alpha)}{\gamma \sqrt{\pi} \Gamma(alpha -1/2)}` is not included as special functions
are not supported by numba library.
Parameters
----------
y: array_like
1D array of pixels :math:`y`, regularly spaced.
amplitude: float
Integral :math:`A` of the function.
y_c: float
Center :math:`y_c` of the function.
gamma: float
Width :math:`\gamma` of the function.
alpha: float
Exponent :math:`\alpha` of the Moffat function.
Returns
-------
output: array_like
1D array of the function evaluated on the y pixel array.
Examples
--------
>>> Ny = 50
>>> y = np.arange(Ny)
>>> amplitude = 10
>>> alpha = 2
>>> gamma = 5
>>> a = evaluate_moffat1d_unnormalized(y, amplitude=amplitude, y_c=Ny/2, gamma=gamma, alpha=alpha)
>>> norm = gamma * np.sqrt(np.pi) * special.gamma(alpha - 0.5) / special.gamma(alpha)
>>> a = a / norm
>>> print(f"{np.sum(a):.6f}")
9.967561
.. doctest::
:hide:
>>> assert np.isclose(np.argmax(a), Ny/2, atol=0.5)
>>> assert np.isclose(np.argmax(a), Ny/2, atol=0.5)
.. plot::
import numpy as np
import matplotlib.pyplot as plt
from spectractor.extractor.psf import *
Ny = 50
y = np.arange(Ny)
amplitude = 10
a = evaluate_moffat1d(y, amplitude=amplitude, y_c=Ny/2, gamma=5, alpha=2)
plt.plot(a)
plt.grid()
plt.xlabel("y")
plt.ylabel("Moffat")
plt.show()
"""
rr = (y - y_c) * (y - y_c)
rr_gg = rr / (gamma * gamma)
a = (1 + rr_gg) ** -alpha
# dx = y[1] - y[0]
# integral = np.sum(a) * dx
# norm = amplitude
# if integral != 0:
# a /= integral
# a *= amplitude
a *= amplitude
return a
@njit(fastmath=True, cache=True)
def evaluate_moffatgauss1d_unnormalized(y, amplitude, y_c, gamma, alpha, eta_gauss, sigma): # pragma: nocover
r"""Compute a 1D Moffat-Gaussian function, whose integral is not normalised to unity.
.. math ::
f(y) \propto A \left\lbrace
\frac{1}{\left[ 1 +\left(\frac{y-y_c}{\gamma}\right)^2 \right]^\alpha}
- \eta e^{-(y-y_c)^2/(2\sigma^2)}\right\rbrace
\quad\text{ and } \quad \eta < 0, \alpha > 1/2
Note that this function is defined only for :math:`alpha > 1/2`. The normalisation factor for the Moffat
:math:`\frac{\Gamma(alpha)}{\gamma \sqrt{\pi} \Gamma(alpha -1/2)}` is not included as special functions
are not supproted by the numba library.
Parameters
----------
y: array_like
1D array of pixels :math:`y`, regularly spaced.
amplitude: float
Integral :math:`A` of the function.
y_c: float
Center :math:`y_c` of the function.
gamma: float
Width :math:`\gamma` of the Moffat function.
alpha: float
Exponent :math:`\alpha` of the Moffat function.
eta_gauss: float
Relative negative amplitude of the Gaussian function.
sigma: float
Width :math:`\sigma` of the Gaussian function.
Returns
-------
output: array_like
1D array of the function evaluated on the y pixel array.
Examples
--------
>>> Ny = 50
>>> y = np.arange(Ny)
>>> amplitude = 10
>>> gamma = 5
>>> alpha = 2
>>> eta_gauss = -0.1
>>> sigma = 1
>>> a = evaluate_moffatgauss1d_unnormalized(y, amplitude=amplitude, y_c=Ny/2, gamma=gamma, alpha=alpha,
... eta_gauss=eta_gauss, sigma=sigma)
>>> norm = gamma*np.sqrt(np.pi)*special.gamma(alpha-0.5)/special.gamma(alpha) + eta_gauss*np.sqrt(2*np.pi)*sigma
>>> a = a / norm
>>> print(f"{np.sum(a):.6f}")
9.966492
.. doctest::
:hide:
>>> assert np.isclose(np.sum(a), amplitude, atol=0.5)
>>> assert np.isclose(np.argmax(a), Ny/2, atol=0.5)
.. plot::
import numpy as np
import matplotlib.pyplot as plt
from spectractor.extractor.psf import *
Ny = 50
y = np.arange(Ny)
amplitude = 10
a = evaluate_moffatgauss1d(y, amplitude=amplitude, y_c=Ny/2, gamma=5, alpha=2, eta_gauss=-0.1, sigma=1)
plt.plot(a)
plt.grid()
plt.xlabel("y")
plt.ylabel("Moffat")
plt.show()
"""
rr = (y - y_c) * (y - y_c)
rr_gg = rr / (gamma * gamma)
a = (1 + rr_gg) ** -alpha + eta_gauss * np.exp(-(rr / (2. * sigma * sigma)))
# dx = y[1] - y[0]
# integral = np.sum(a) * dx
# norm = amplitude
# if integral != 0:
# norm /= integral
# a *= norm
a *= amplitude
return a
@njit(fastmath=True, cache=True)
def evaluate_moffat2d(x, y, amplitude, x_c, y_c, gamma, alpha): # pragma: nocover
r"""Compute a 2D Moffat function, whose integral is normalised to unity.
.. math ::
f(x, y) = \frac{A (\alpha - 1)}{\pi \gamma^2} \frac{1}{
\left[ 1 +\frac{\left(x-x_c\right)^2+\left(y-y_c\right)^2}{\gamma^2} \right]^\alpha}
\quad\text{with}\quad
\int_{-\infty}^{\infty}\int_{-\infty}^{\infty}f(x, y) \mathrm{d}x \mathrm{d}y = A
Note that this function is defined only for :math:`alpha > 1`.
Note that the normalisation of a 2D Moffat function is analytical so it is not expected that
the sum of the output array is equal to :math:`A`, but lower.
Parameters
----------
x: array_like
2D array of pixels :math:`x`, regularly spaced.
y: array_like
2D array of pixels :math:`y`, regularly spaced.
amplitude: float
Integral :math:`A` of the function.
x_c: float
X axis center :math:`x_c` of the function.
y_c: float
Y axis center :math:`y_c` of the function.
gamma: float
Width :math:`\gamma` of the function.
alpha: float
Exponent :math:`\alpha` of the Moffat function.
Returns
-------
output: array_like
2D array of the function evaluated on the y pixel array.
Examples
--------
>>> Nx = 50
>>> Ny = 50
>>> yy, xx = np.mgrid[:Ny, :Nx]
>>> amplitude = 10
>>> a = evaluate_moffat2d(xx, yy, amplitude=amplitude, x_c=Nx/2, y_c=Ny/2, gamma=5, alpha=2)
>>> print(f"{np.sum(a):.6f}")
9.683129
.. doctest::
:hide:
>>> assert not np.isclose(np.sum(a), amplitude)
.. plot::
import numpy as np
import matplotlib.pyplot as plt
from spectractor.extractor.psf import *
Nx = 50
Ny = 50
yy, xx = np.mgrid[:Nx, :Ny]
amplitude = 10
a = evaluate_moffat2d(xx, yy, amplitude=amplitude, y_c=Ny/2, x_c=Nx/2, gamma=5, alpha=2)
im = plt.pcolor(xx, yy, a)
plt.grid()
plt.xlabel("x")
plt.ylabel("y")
plt.colorbar(im, label="Moffat 2D")
plt.show()
"""
rr_gg = ((x - x_c) * (x - x_c) / (gamma * gamma) + (y - y_c) * (y - y_c) / (gamma * gamma))
a = (1 + rr_gg) ** -alpha
norm = (np.pi * gamma * gamma) / (alpha - 1)
a *= amplitude / norm
return a
@njit(fastmath=True, cache=True)
def evaluate_moffatgauss2d(x, y, amplitude, x_c, y_c, gamma, alpha, eta_gauss, sigma): # pragma: nocover
r"""Compute a 2D Moffat-Gaussian function, whose integral is normalised to unity.
.. math ::
f(x, y) = \frac{A}{\frac{\pi \gamma^2}{\alpha-1} + 2 \pi \eta \sigma^2}\left\lbrace \frac{1}{
\left[ 1 +\frac{\left(x-x_c\right)^2+\left(y-y_c\right)^2}{\gamma^2} \right]^\alpha}
+ \eta e^{-\left[ \left(x-x_c\right)^2+\left(y-y_c\right)^2\right]/(2 \sigma^2)}
\right\rbrace
.. math ::
\quad\text{with}\quad
\int_{-\infty}^{\infty}\int_{-\infty}^{\infty}f(x, y) \mathrm{d}x \mathrm{d}y = A
\quad\text{and} \quad \eta < 0
Note that this function is defined only for :math:`alpha > 1`.
Parameters
----------
x: array_like
2D array of pixels :math:`x`, regularly spaced.
y: array_like
2D array of pixels :math:`y`, regularly spaced.
amplitude: float
Integral :math:`A` of the function.
x_c: float
X axis center :math:`x_c` of the function.
y_c: float
Y axis center :math:`y_c` of the function.
gamma: float
Width :math:`\gamma` of the function.
alpha: float
Exponent :math:`\alpha` of the Moffat function.
eta_gauss: float
Relative negative amplitude of the Gaussian function.
sigma: float
Width :math:`\sigma` of the Gaussian function.
Returns
-------
output: array_like
2D array of the function evaluated on the y pixel array.
Examples
--------
>>> Nx = 50
>>> Ny = 50
>>> yy, xx = np.mgrid[:Ny, :Nx]
>>> amplitude = 10
>>> a = evaluate_moffatgauss2d(xx, yy, amplitude=amplitude, x_c=Nx/2, y_c=Ny/2, gamma=5, alpha=2,
... eta_gauss=-0.1, sigma=1)
>>> print(f"{np.sum(a):.6f}")
9.680573
.. doctest::
:hide:
>>> assert not np.isclose(np.sum(a), amplitude)
.. plot::
import numpy as np
import matplotlib.pyplot as plt
from spectractor.extractor.psf import *
Nx = 50
Ny = 50
yy, xx = np.mgrid[:Nx, :Ny]
amplitude = 10
a = evaluate_moffatgauss2d(xx, yy, amplitude, Nx/2, Ny/2, gamma=5, alpha=2, eta_gauss=-0.1, sigma=1)
im = plt.pcolor(xx, yy, a)
plt.grid()
plt.xlabel("x")
plt.ylabel("y")
plt.colorbar(im, label="Moffat 2D")
plt.show()
"""
rr = ((x - x_c) * (x - x_c) + (y - y_c) * (y - y_c))
rr_gg = rr / (gamma * gamma)
a = (1 + rr_gg) ** -alpha + eta_gauss * np.exp(-(rr / (2. * sigma * sigma)))
norm = (np.pi * gamma * gamma) / (alpha - 1) + eta_gauss * 2 * np.pi * sigma * sigma
a *= amplitude / norm
return a
class PSF:
"""Generic PSF model class.
The PSF models must contain at least the "amplitude", "x_c" and "y_c" parameters as the first three parameters
(in this order) and "saturation" parameter as the last parameter. "amplitude", "x_c" and "y_c"
stands respectively for the general amplitude of the model, the position along the dispersion axis and the
transverse position with respect to the dispersion axis (assumed to be the X axis).
Last "saturation" parameter must be express in the same units as the signal to model and as the "amplitude"
parameter. The PSF models must be normalized to one in total flux divided by the first parameter (amplitude).
Then the PSF model integral is equal to the "amplitude" parameter.
"""
def __init__(self, clip=False):
"""
Parameters
----------
clip: bool, optional
If True, PSF evaluation is clipped between 0 and saturation level (slower) (default: False)
"""
self.my_logger = set_logger(self.__class__.__name__)
self.p = np.array([])
self.param_names = ["amplitude", "x_c", "y_c", "saturation"]
self.axis_names = ["$A$", r"$x_c$", r"$y_c$", "saturation"]
self.bounds = [[]]
self.p_default = np.array([1, 0, 0, 1])
self.max_half_width = np.inf
self.clip = clip
def evaluate(self, pixels, p=None): # pragma: no cover
if p is not None:
self.p = np.asarray(p).astype(float)
if pixels.ndim == 3 and pixels.shape[0] == 2:
return np.zeros_like(pixels)
elif pixels.ndim == 1:
return np.zeros_like(pixels)
else:
raise ValueError(f"Pixels array must have dimension 1 or shape=(2,Nx,Ny). Here pixels.ndim={pixels.shape}.")
def apply_max_width_to_bounds(self, max_half_width=None): # pragma: no cover
pass
def fit_psf(self, data, data_errors=None, bgd_model_func=None):
"""
Fit a PSF model on 1D or 2D data.
Parameters
----------
data: array_like
1D or 2D array containing the data.
data_errors: np.array, optional
The 1D or 2D array of uncertainties.
bgd_model_func: callable, optional
A 1D or 2D function to model the extracted background (default: None -> null background).
Returns
-------
fit_workspace: PSFFitWorkspace
The PSFFitWorkspace instance to get info about the fitting.
Examples
--------
Build a mock PSF2D without background and with random Poisson noise:
>>> p0 = np.array([200000, 20, 30, 5, 2, -0.1, 2, 400000])
>>> psf0 = MoffatGauss(p0)
>>> yy, xx = np.mgrid[:50, :60]
>>> data = psf0.evaluate(np.array([xx, yy]), p0)
>>> data = np.random.poisson(data)
>>> data_errors = np.sqrt(data+1)
Fit the data in 2D:
>>> p = np.array([150000, 19, 31, 4.5, 2.5, -0.1, 3, 400000])
>>> psf = MoffatGauss(p)
>>> w = psf.fit_psf(data, data_errors=data_errors, bgd_model_func=None)
>>> w.plot_fit()
.. doctest::
:hide:
>>> assert w.model is not None
>>> residuals = (w.data-w.model)/w.err
>>> assert w.costs[-1] / w.pixels.size < 1.3
>>> assert np.abs(np.mean(residuals)) < 0.4
>>> assert np.std(residuals) < 1.2
>>> assert np.all(np.isclose(psf.p[1:3], p0[1:3], atol=1e-1))
Fit the data in 1D:
>>> data1d = data[:,int(p0[1])]
>>> data1d_err = data_errors[:,int(p0[1])]
>>> p = np.array([10000, 20, 32, 4, 3, -0.1, 2, 400000])
>>> psf1d = MoffatGauss(p)
>>> w = psf1d.fit_psf(data1d, data_errors=data1d_err, bgd_model_func=None)
>>> w.plot_fit()
.. doctest::
:hide:
>>> assert w.model is not None
>>> residuals = (w.data-w.model)/w.err
>>> assert w.costs[-1] / w.pixels.size < 1.2
>>> assert np.abs(np.mean(residuals)) < 0.2
>>> assert np.std(residuals) < 1.2
>>> assert np.all(np.isclose(w.p[2], p0[2], atol=1e-1))
.. plot::
import numpy as np
import matplotlib.pyplot as plt
from spectractor.extractor.psf import *
p = np.array([200000, 20, 30, 5, 2, -0.1, 2, 400000])
psf = MoffatGauss(p)
yy, xx = np.mgrid[:50, :60]
data = psf.evaluate(np.array([xx, yy]), p)
data = np.random.poisson(data)
data_errors = np.sqrt(data+1)
data = np.random.poisson(data)
data_errors = np.sqrt(data+1)
psf = MoffatGauss(p)
w = psf.fit_psf(data, data_errors=data_errors, bgd_model_func=None)
w.plot_fit()
"""
w = PSFFitWorkspace(self, data, data_errors, bgd_model_func=bgd_model_func,
verbose=False, live_fit=False)
run_minimisation(w, method="newton", ftol=1 / w.pixels.size, xtol=1e-6, niter=50, fix=w.fixed)
self.p = np.copy(w.p)
return w
class Moffat(PSF):
def __init__(self, p=None, clip=False):
PSF.__init__(self, clip=clip)
self.p_default = | np.array([1, 0, 0, 3, 2, 1]) | numpy.array |
# This initializes the problem class for SWE
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import animation
from mpl_toolkits.mplot3d import Axes3D
from parameters import Nx, Ny, Lx, Ly
from parameters import rho, grav, dt, dx, dy, ft
from parameters import K
from parameters import plot_viz, num_steps_per_plot, num_samples, num_train
# Common functions for spatial discretizations
def state_reconstruction(q,Nx,Ny):
# Weno5
pad = 3
qtemp = periodic_bc(q,pad)
# Smoothness indicators in x
beta_0 = 13.0/12.0*(qtemp[pad-2:pad+Nx-2,:]-2.0*qtemp[pad-1:pad+Nx-1,:]+qtemp[pad:Nx+pad,:])**2 \
+ 1.0/4.0*(qtemp[pad-2:pad+Nx-2,:]-4.0*qtemp[pad-1:pad+Nx-1,:]+3.0*qtemp[pad:Nx+pad,:])**2
beta_1 = 13.0/12.0*(qtemp[pad-1:pad+Nx-1,:]-2.0*qtemp[pad:pad+Nx,:]+qtemp[pad+1:Nx+pad+1,:])**2 \
+ 1.0/4.0*(qtemp[pad-1:pad+Nx-1,:]-qtemp[pad+1:pad+Nx+1,:])**2
beta_2 = 13.0/12.0*(qtemp[pad:pad+Nx,:]-2.0*qtemp[pad+1:pad+Nx+1,:]+qtemp[pad+2:Nx+pad+2,:])**2 \
+ 1.0/4.0*(3.0*qtemp[pad:pad+Nx,:]-4.0*qtemp[pad+1:pad+Nx+1,:]+qtemp[pad+2:Nx+pad+2,:])**2
# nonlinear weights in x
alpha_0 = (1.0/10.0)/((beta_0+1.0e-6)**2)
alpha_1 = (6.0/10.0)/((beta_1+1.0e-6)**2)
alpha_2 = (3.0/10.0)/((beta_2+1.0e-6)**2)
# Find nonlinear weights
w_0 = (alpha_0/(alpha_0+alpha_1+alpha_2))/6.0
w_1 = (alpha_1/(alpha_0+alpha_1+alpha_2))/6.0
w_2 = (alpha_2/(alpha_0+alpha_1+alpha_2))/6.0
# Find state reconstructions in x - wave to right (at i+1/2)
qxright = w_0*(2.0*qtemp[pad-2:pad+Nx-2,:]-7.0*qtemp[pad-1:pad+Nx-1,:]+11.0*qtemp[pad:pad+Nx,:]) \
+ w_1*(-qtemp[pad-1:pad+Nx-1,:]+5.0*qtemp[pad:pad+Nx,:]+2.0*qtemp[pad+1:pad+Nx+1,:]) \
+ w_2*(2.0*qtemp[pad:pad+Nx,:]+5.0*qtemp[pad+1:pad+Nx+1,:]-qtemp[pad+2:pad+Nx+2,:])
# Find state reconstructions in x - wave to left (at i+1/2)
qxleft = w_0*(2.0*qtemp[pad+2:pad+Nx+2,:]-7.0*qtemp[pad+1:pad+Nx+1,:]+11.0*qtemp[pad:pad+Nx,:]) \
+ w_1*(-qtemp[pad+1:pad+Nx+1,:]+5.0*qtemp[pad:pad+Nx,:]+2.0*qtemp[pad-1:pad+Nx-1,:]) \
+ w_2*(2.0*qtemp[pad:pad+Nx,:]+5.0*qtemp[pad-1:pad+Nx-1,:]-qtemp[pad-2:pad+Nx-2,:])
qxleft = qxleft[:,pad:pad+Ny]
qxright = qxright[:,pad:pad+Ny]
# Smoothness indicators in y
beta_0 = 13.0/12.0*(qtemp[:,pad-2:pad+Ny-2]-2.0*qtemp[:,pad-1:pad+Ny-1]+qtemp[:,pad:Ny+pad])**2 \
+ 1.0/4.0*(qtemp[:,pad-2:pad+Ny-2]-4.0*qtemp[:,pad-1:pad+Ny-1]+3.0*qtemp[:,pad:Ny+pad])**2
beta_1 = 13.0/12.0*(qtemp[:,pad-1:pad+Ny-1]-2.0*qtemp[:,pad:pad+Ny]+qtemp[:,pad+1:Ny+pad+1])**2 \
+ 1.0/4.0*(qtemp[:,pad-1:pad+Ny-1]-qtemp[:,pad+1:pad+Ny+1])**2
beta_2 = 13.0/12.0*(qtemp[:,pad:pad+Ny]-2.0*qtemp[:,pad+1:pad+Ny+1]+qtemp[:,pad+2:Ny+pad+2])**2 \
+ 1.0/4.0*(3.0*qtemp[:,pad:pad+Ny]-4.0*qtemp[:,pad+1:pad+Ny+1]+qtemp[:,pad+2:Ny+pad+2])**2
# nonlinear weights in x
alpha_0 = (1.0/10.0)/((beta_0+1.0e-6)**2)
alpha_1 = (6.0/10.0)/((beta_1+1.0e-6)**2)
alpha_2 = (3.0/10.0)/((beta_2+1.0e-6)**2)
# Find nonlinear weights
w_0 = (alpha_0/(alpha_0+alpha_1+alpha_2))/6.0
w_1 = (alpha_1/(alpha_0+alpha_1+alpha_2))/6.0
w_2 = (alpha_2/(alpha_0+alpha_1+alpha_2))/6.0
# Find state reconstructions in y - qright (at i+1/2)
qyright = w_0*(2.0*qtemp[:,pad-2:pad+Ny-2]-7.0*qtemp[:,pad-1:pad+Ny-1]+11.0*qtemp[:,pad:pad+Ny]) \
+ w_1*(-qtemp[:,pad-1:pad+Ny-1]+5.0*qtemp[:,pad:pad+Ny]+2.0*qtemp[:,pad+1:pad+Ny+1]) \
+ w_2*(2.0*qtemp[:,pad:pad+Ny]+5.0*qtemp[:,pad+1:pad+Ny+1]-qtemp[:,pad+2:pad+Ny+2])
# Find state reconstructions in y - wave to left (at i+1/2)
qyleft = w_0*(2.0*qtemp[:,pad+2:pad+Ny+2]-7.0*qtemp[:,pad+1:pad+Ny+1]+11.0*qtemp[:,pad:pad+Ny]) \
+ w_1*(-qtemp[:,pad+1:pad+Ny+1]+5.0*qtemp[:,pad:pad+Ny]+2.0*qtemp[:,pad-1:pad+Ny-1]) \
+ w_2*(2.0*qtemp[:,pad:pad+Ny]+5.0*qtemp[:,pad-1:pad+Ny-1]-qtemp[:,pad-2:pad+Ny-2])
qyleft = qyleft[pad:pad+Nx,:]
qyright = qyright[pad:pad+Nx,:]
return qxleft, qxright, qyleft, qyright
def reimann_solve(spec_rad,fl,fr,ql,qr,dim):
# Rusanov reimann solver
pad = 3
srt = periodic_bc(spec_rad,pad)
if dim == 'x':
srt = np.maximum.reduce([srt[pad-3:Nx+pad-3,pad:Ny+pad],srt[pad-2:Nx+pad-2,pad:Ny+pad],srt[pad-1:Nx+pad-1,pad:Ny+pad],\
srt[pad:Nx+pad,pad:Ny+pad],srt[pad+1:Nx+pad+1,pad:Ny+pad],srt[pad+2:Nx+pad+2,pad:Ny+pad],srt[pad+3:Nx+pad+3,pad:Ny+pad]])
flux = 0.5*(fr+fl) + 0.5*srt*(qr+ql)
return flux
else:
srt = np.maximum.reduce([srt[pad:Nx+pad,pad-3:Ny+pad-3],srt[pad:Nx+pad,pad-2:Ny+pad-2],srt[pad:Nx+pad,pad-1:Ny+pad-1],\
srt[pad:Nx+pad,pad:Ny+pad],srt[pad:Nx+pad,pad+1:Ny+pad+1],srt[pad:Nx+pad,pad+2:Ny+pad+2],srt[pad:Nx+pad,pad+3:Ny+pad+3]])
flux = 0.5*(fr+fl) + 0.5*srt*(qr+ql)
return flux
def periodic_bc(q,pad):
qtemp = np.zeros(shape=(q.shape[0]+2*pad,q.shape[1]+2*pad),dtype='double')
# Periodicity updates
qtemp[pad:Nx+pad,pad:Ny+pad] = q[:,:]
# x direction periodicity
qtemp[0:pad,:] = qtemp[Nx-pad:Nx,:]
qtemp[Nx+pad:,:] = qtemp[pad:2*pad,:]
# y direction periodicity
qtemp[:,0:pad] = qtemp[:,Ny-pad:Ny]
qtemp[:,Ny+pad:] = qtemp[:,pad:2*pad]
return qtemp
def spectral_radius(q1,q2):
sound_speed = 2.0*np.sqrt(q1/rho*grav)
u = q2/q1
return np.maximum.reduce([np.abs(u+sound_speed),np.abs(u-sound_speed),\
np.abs(sound_speed)])
def flux_reconstruction(q1,q2,q3):
spec_rad_x = spectral_radius(q1,q2)
spec_rad_y = spectral_radius(q1,q3)
q1xleft, q1xright, q1yleft, q1yright = state_reconstruction(q1,Nx,Ny)
q2xleft, q2xright, q2yleft, q2yright = state_reconstruction(q2,Nx,Ny)
q3xleft, q3xright, q3yleft, q3yright = state_reconstruction(q3,Nx,Ny)
# Reconstructing fluxes for q1
f1xleft = np.copy(q2xleft)
f1xright = np.copy(q2xright)
f1x = reimann_solve(spec_rad_x,f1xleft,f1xright,q1xleft,q1xright,'x')
f1yleft = np.copy(q3yleft)
f1yright = np.copy(q3yright)
f1y = reimann_solve(spec_rad_y,f1yleft,f1yright,q1yleft,q1yright,'y')
# Reconstructing fluxes for q2
f2xleft = (q2xleft**2)/(q1xleft) + 0.5*(q1xleft**2)*(grav/rho)
f2xright = (q2xright**2)/(q1xright) + 0.5*(q1xright**2)*(grav/rho)
f2x = reimann_solve(spec_rad_x,f1xleft,f2xright,q2xleft,q2xright,'x')
f2yleft = (q2yleft*q3yleft/q1yleft)
f2yright = (q2yright*q3yright/q1yright)
f2y = reimann_solve(spec_rad_y,f2yleft,f2yright,q2yleft,q2yright,'y')
# Reconstructing fluxes for q3
f3xleft = (q2xleft*q3xleft/q1xleft)
f3xright = (q2xright*q3xright/q1xright)
f3x = reimann_solve(spec_rad_x,f3xleft,f3xright,q3xleft,q3xright,'x')
f3yleft = (q3yleft**2)/(q1yleft) + 0.5*(q1yleft**2)*(grav/rho)
f3yright = (q3yright**2)/(q1yright) + 0.5*(q1yright**2)*(grav/rho)
f3y = reimann_solve(spec_rad_y,f3yleft,f3yright,q3yleft,q3yright,'y')
return f1x, f1y, f2x, f2y, f3x, f3y
# Plotting functions
def plot_coefficients(Ytilde):
fig,ax = plt.subplots(nrows=1,ncols=4)
ax[0].plot(Ytilde[0,:],label='Mode 1')
ax[1].plot(Ytilde[1,:],label='Mode 2')
ax[2].plot(Ytilde[2,:],label='Mode 3')
ax[3].plot(Ytilde[3,:],label='Mode 4')
plt.legend()
plt.show()
def plot_fields_debug(X,Y,q,label,iter):
fig = plt.figure(figsize = (11, 7))
ax = Axes3D(fig)
surf = ax.plot_surface(X, Y, q, rstride = 1, cstride = 1,
cmap = plt.cm.jet, linewidth = 0, antialiased = True)
ax.set_title('Visualization', fontname = "serif", fontsize = 17)
ax.set_xlabel("x [m]", fontname = "serif", fontsize = 16)
ax.set_ylabel("y [m]", fontname = "serif", fontsize = 16)
if label == 'q1':
ax.set_zlim((0,2))
elif label == 'q2':
ax.set_zlim((-1,1))
else:
ax.set_zlim((-1,1))
plt.savefig(label+'_'+str(iter)+'.png')
# Shallow water equations class
class shallow_water(object):
"""docstring for ClassName"""
def __init__(self,args=[0,0]):
self.Nx = Nx
self.Ny = Ny
self.Lx = Lx
self.Ly = Ly
x = np.linspace(-self.Lx/2, self.Lx/2, self.Nx) # Array with x-points
y = np.linspace(-self.Ly/2, self.Ly/2, self.Ny) # Array with y-points
# Meshgrid for plotting
self.X, self.Y = np.meshgrid(x, y)
# Initialize fields
self.initialize(args)
# Field storage for viz
self.q_list = []
# Plot interval
self.plot_interval = num_steps_per_plot
# Field storage for ROM
self.snapshots_pod = [] # at plot interval
def initialize(self,args=[0,0]):
loc_x = args[0]
loc_y = args[1]
# There are three conserved quantities - initialize
self.q1 = 1.0+(rho*np.exp(-((self.X-loc_x)**2/(2*(0.05)**2) + (self.Y-loc_y)**2/(2*(0.05)**2))))
self.q2 = np.zeros(shape=(self.Nx,self.Ny),dtype='double')
self.q3 = np.zeros(shape=(self.Nx,self.Ny),dtype='double')
def right_hand_side(self,q1,q2,q3):
f1x, f1y, f2x, f2y, f3x, f3y = flux_reconstruction(q1,q2,q3) # these are all i+1/2
# Periodicity
pad = 1
f1xtemp = periodic_bc(f1x,pad)
f1ytemp = periodic_bc(f1y,pad)
f2xtemp = periodic_bc(f2x,pad)
f2ytemp = periodic_bc(f2y,pad)
f3xtemp = periodic_bc(f3x,pad)
f3ytemp = periodic_bc(f3y,pad)
r1 = 1.0/dx*(f1xtemp[pad:Nx+pad,pad:Ny+pad]-f1xtemp[pad-1:Nx+pad-1,pad:Ny+pad]) + 1.0/dy*(f1ytemp[pad:Nx+pad,pad:Ny+pad]-f1ytemp[pad:Nx+pad,pad-1:Ny+pad-1])
r2 = 1.0/dx*(f2xtemp[pad:Nx+pad,pad:Ny+pad]-f2xtemp[pad-1:Nx+pad-1,pad:Ny+pad]) + 1.0/dy*(f2ytemp[pad:Nx+pad,pad:Ny+pad]-f2ytemp[pad:Nx+pad,pad-1:Ny+pad-1])
r3 = 1.0/dx*(f3xtemp[pad:Nx+pad,pad:Ny+pad]-f3xtemp[pad-1:Nx+pad-1,pad:Ny+pad]) + 1.0/dy*(f3ytemp[pad:Nx+pad,pad:Ny+pad]-f3ytemp[pad:Nx+pad,pad-1:Ny+pad-1])
return -r1, -r2, -r3
def integrate_rk(self):
# Equally spaced time integration
q1temp = | np.copy(self.q1) | numpy.copy |
import timeseries as ts
import numpy as np
import scipy
from scipy.stats import norm
from ._corr import stand, kernel_corr
#from ._corr import stand, kernel_corr
import asyncio
Breakpoints = {}
Breakpoints[2] = np.array([0.])
Breakpoints[4] = np.array([-0.67449,0,0.67449])
Breakpoints[8] = np.array([-1.1503,-0.67449,-0.31864,0,0.31864,0.67449,1.1503])
Breakpoints[16] = np.array([-1.5341,-1.1503,-0.88715,-0.67449,-0.48878,-0.31864,-0.15731,0,0.15731,0.31864,0.48878,0.67449,0.88715,1.1503,1.5341])
Breakpoints[32] = np.array([-1.8627,-1.5341,-1.318,-1.1503,-1.01,-0.88715,-0.77642,-0.67449,-0.57913,-0.48878,-0.40225,-0.31864,-0.2372,-0.15731,-0.078412,0,0.078412,0.15731,0.2372,0.31864,0.40225,0.48878,0.57913,0.67449,0.77642,0.88715,1.01,1.1503,1.318,1.5341,1.8627])
Breakpoints[64] = np.array([-2.1539,-1.8627,-1.6759,-1.5341,-1.4178,-1.318,-1.2299,-1.1503,-1.0775,-1.01,-0.94678,-0.88715,-0.83051,-0.77642,-0.72451,-0.67449,-0.6261,-0.57913,-0.53341,-0.48878,-0.4451,-0.40225,-0.36013,-0.31864,-0.27769,-0.2372,-0.1971,-0.15731,-0.11777,-0.078412,-0.039176,0,0.039176,0.078412,0.11777,0.15731,0.1971,0.2372,0.27769,0.31864,0.36013,0.40225,0.4451,0.48878,0.53341,0.57913,0.6261,0.67449,0.72451,0.77642,0.83051,0.88715,0.94678,1.01,1.0775,1.1503,1.2299,1.318,1.4178,1.5341,1.6759,1.8627,2.1539])
# this function is directly used for augmented selects
def proc_main(pk, row, arg):
#your code here
argts = ts.TimeSeries(*arg)
series = stand(argts,argts.mean(),argts.std())
a = 4
w = 32
symbols = ['{0:b}'.format(i).zfill(int(np.log(a-1)/np.log(2))+1) for i in range(a)]
if a in Breakpoints:
breakpoints = Breakpoints[a]#norm.ppf(np.array([i/a for i in range(1,a)]))
else:
raise ValueError('Breakpoints do not exist for cardinality {}'.format(a))
breakpoints = | np.array([*breakpoints,np.inf]) | numpy.array |
# -*- coding: utf-8 -*-
import numpy as np
import torch as th
import leibniz as lbnz
from cached_property import cached_property
from torch import Tensor
from leibniz import cast
class RegularGrid:
def __init__(self, basis, L=2, W=2, H=2, east=1, west=-1, north=1, south=-1, upper=1, lower=-1):
self.basis = basis
self.L = L
self.W = W
self.H = H
self.shape = (1, 1, self.L, self.W, self.H)
self.east = east
self.west = west
self.north = north
self.south = south
self.upper = upper
self.lower = lower
self.default_device = -1
def get_device(self):
if self.default_device == -1:
return 'cpu'
return self.default_device
def set_device(self, ix):
self.default_device = ix
def mk_zero(self) -> Tensor:
return cast(np.zeros(self.shape), device=self.default_device)
@cached_property
def zero(self) -> Tensor:
return self.mk_zero()
def mk_one(self) -> Tensor:
return cast( | np.ones(self.shape) | numpy.ones |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import math
from glmatrix import *
import numpy as np
print("#########################################")
#np.set_printoptions(precision=3)
np.set_printoptions(formatter={'float': '{: 8.3f}'.format})
#np.set_printoptions(suppress=True)
location_v = vec3_create([5.0, 6.0, 7.0])
location_m = gl_mat4_from_translation(location_v)
print("Location Matrix")
print("")
#print(location_m)
transform_array = np.array(location_m, np.float32)
print(transform_array)
print("")
print("#########################################")
deg = -10
rad = (deg * math.pi / 180)
q_rot = gl_quat_from_x_rotation(rad)
rotation_m = mat4_create(None)
gl_mat4_from_quat(q_rot, rotation_m)
print("Rotation Matrix - X")
print("")
transform_array = | np.array(q_rot, np.float32) | numpy.array |
#!/usr/bin/python
import numpy as np
# from Python lists
a1 = np.array([[1, 2, 3], [4, 5, 6]])
print(a1)
# array of zeros
a2 = np.zeros((2, 2))
print(a2)
# array of oness
a3 = np.ones((2, 2)) # Create an array of all ones
print(a3) # Prints "[[ 1. 1.]]"
# array of predefined values
a4 = np.full((2, 2), 5)
print(a4)
# identity matrix
a5 = np.eye(3)
print(a5)
# array of random values
a6 = | np.random.random((3, 3)) | numpy.random.random |
# -*- coding: utf-8 -*-
import os
import sys
import h5py
from matplotlib import rcParams
import matplotlib.pyplot as plt
import numpy as np
from scipy.optimize import curve_fit
from presto.utils import rotate_opt
rcParams['figure.dpi'] = 108.8
if len(sys.argv) == 2:
load_filename = sys.argv[1]
print(f"Loading: {os.path.realpath(load_filename)}")
else:
load_filename = None
def load(load_filename):
with h5py.File(load_filename, "r") as h5f:
num_averages = h5f.attrs["num_averages"]
control_freq = h5f.attrs["control_freq"]
control_if = h5f.attrs["control_if"]
readout_freq = h5f.attrs["readout_freq"]
readout_duration = h5f.attrs["readout_duration"]
control_duration = h5f.attrs["control_duration"]
readout_amp = h5f.attrs["readout_amp"]
control_amp_90 = h5f.attrs["control_amp_90"]
control_amp_180 = h5f.attrs["control_amp_180"]
sample_duration = h5f.attrs["sample_duration"]
nr_delays = h5f.attrs["nr_delays"]
dt_delays = h5f.attrs["dt_delays"]
wait_delay = h5f.attrs["wait_delay"]
readout_sample_delay = h5f.attrs["readout_sample_delay"]
t_arr = h5f["t_arr"][()]
store_arr = h5f["store_arr"][()]
source_code = h5f["source_code"][()]
print(f"Control frequency: {control_freq / 1e9:.2f} GHz")
t_low = 1500 * 1e-9
t_high = 2000 * 1e-9
t_span = t_high - t_low
idx_low = np.argmin(np.abs(t_arr - t_low))
idx_high = np.argmin(np.abs(t_arr - t_high))
idx = np.arange(idx_low, idx_high)
nr_samples = len(idx)
# Plot raw store data for first iteration as a check
fig1, ax1 = plt.subplots(2, 1, sharex=True, tight_layout=True)
ax11, ax12 = ax1
ax11.axvspan(1e9 * t_low, 1e9 * t_high, facecolor="#dfdfdf")
ax12.axvspan(1e9 * t_low, 1e9 * t_high, facecolor="#dfdfdf")
ax11.plot(1e9 * t_arr, np.abs(store_arr[0, 0, :]))
ax12.plot(1e9 * t_arr, np.angle(store_arr[0, 0, :]))
ax12.set_xlabel("Time [ns]")
fig1.show()
# Analyze T2
resp_arr = np.mean(store_arr[:, 0, idx], axis=-1)
data = rotate_opt(resp_arr)
delay_arr = dt_delays * np.arange(nr_delays)
# Fit data to I quadrature
try:
popt, perr = fit_simple(delay_arr, np.real(data))
T2 = popt[0]
T2_err = perr[0]
print("T2_echo time: {} +- {} us".format(1e6 * T2, 1e6 * T2_err))
# det = popt[3]
# det_err = perr[3]
# print("detuning: {} +- {} Hz".format(det, det_err))
# sign = 1.0 if np.abs(popt[4]) < np.pi / 2 else -1.0
# print(f"sign: {sign}")
# i_at_e = popt[0] + sign * popt[1]
# i_at_g = popt[0] - sign * popt[1]
# print(f"|e>: {i_at_e} rad")
# print(f"|g>: {i_at_g} rad")
success = True
except Exception:
print("Unable to fit data!")
success = False
fig2, ax2 = plt.subplots(4, 1, sharex=True, figsize=(6.4, 6.4), tight_layout=True)
ax21, ax22, ax23, ax24 = ax2
ax21.plot(1e6 * delay_arr, np.abs(data))
ax22.plot(1e6 * delay_arr, np.unwrap(np.angle(data)))
ax23.plot(1e6 * delay_arr, np.real(data))
if success:
ax23.plot(1e6 * delay_arr, decay(delay_arr, *popt), '--')
ax24.plot(1e6 * delay_arr, np.imag(data))
ax21.set_ylabel("Amplitude [FS]")
ax22.set_ylabel("Phase [rad]")
ax23.set_ylabel("I [FS]")
ax24.set_ylabel("Q [FS]")
ax2[-1].set_xlabel("Ramsey delay [us]")
fig2.show()
data_max = np.abs(data.real).max()
unit = ""
mult = 1.0
if data_max < 1e-6:
unit = "n"
mult = 1e9
elif data_max < 1e-3:
unit = "μ"
mult = 1e6
elif data_max < 1e0:
unit = "m"
mult = 1e3
fig3, ax3 = plt.subplots(tight_layout=True)
ax3.plot(1e6 * delay_arr, mult * np.real(data), '.')
ax3.set_ylabel(f"I quadrature [{unit:s}FS]")
ax3.set_xlabel("Ramsey delay [μs]")
if success:
ax3.plot(1e6 * delay_arr, mult * decay(delay_arr, *popt), '--')
ax3.set_title(f"T2 echo = {1e6*T2:.0f} ± {1e6*T2_err:.0f} μs")
fig3.show()
return fig1, fig2, fig3
def decay(t, *p):
T, xe, xg = p
return xg + (xe - xg) * | np.exp(-t / T) | numpy.exp |
from __future__ import print_function
import itertools
from snap import viewer
from snap.math import *
from snap.gl import *
import numpy as np
class Box(object):
def __init__(self):
self.size = np.ones(3)
self.frame = Rigid3()
def support(self, d):
local = self.frame.orient.inv()(d)
sign = (local > 0) - (local < 0)
return self.frame(sign * self.size / 2)
def barycentric_coordinates(S, p):
m, n = S.shape
A = np.zeros( (n + 1, m))
A[:n] = S.T
A[-1] = 1
b = np.zeros(m)
b[:n] = p
b[-1] = 1
x = np.linalg.solve(A, b)
return x
def pgs(M, q):
n = q.size
x = np.zeros(n)
while True:
for i in xrange(n):
x[i] -= (M[i].dot(x) + q[i]) / M[i, i]
x[i] = max(x[i], 0)
yield x
def pgs_ls(A, b, p, omega = 1.0):
'''M, q = A.dot(A.T), A.dot(p) - b'''
m, n = A.shape
x = np.zeros(m)
# net = p + A.T.dot(x)
net = np.copy(p)
while True:
for i in xrange(m):
old = x[i]
x[i] -= omega * (A[i].dot(net) - b[i]) / A[i].dot(A[i])
x[i] = max(x[i], 0.0)
# net += A[i] * ( x[i] - old )
net[:] = p + A.T.dot(x)
yield x, net
def pocs(A, b, p):
m, n = A.shape
I = np.zeros( (m, n) )
q = np.copy(p)
def proj(i, x):
y = b[i] - A[i].dot(x)
if y <= 0: return x
res = x + A[i] * (y / (A[i].dot(A[i])))
return res
n = 0
while True:
i = n % m
p = q - I[i]
q[:] = proj(i, p)
I[i] = q - p
yield q
n += 1
def proj_splx(x):
'''O(n log(n) )'''
n = x.size
y = sorted(x)
s = 0
for k in range(n):
i = n - 1 - k
s += y[i]
t = (s - 1.0) / float(n - i)
if t > y[i - 1]:
break
return np.maximum(x - t, 0)
def proj_splx_alt(x, q):
'''O(n log(n) )'''
n = x.size
# sort
p = sorted(range(n), key=lambda k: x[k] / q[k])
s = 0
z = 0
for k in range(n):
i = n - 1 - k
s += x[p[i]]
z += q[p[i]]
t = (s - 1.0) / z
if t > x[ p[i - 1] ] / q[ p[i - 1] ]:
break
return np.maximum(x - q * t, 0)
def project_simplex(S, p):
m, n = S.shape
assert n + 1 == m
Q = np.zeros( (m, m) )
Q[:n] = S.T
Q[-1] = 1
Qinv = np.linalg.inv(Q)
A = Qinv[:, :n]
b = - Qinv[:, -1]
res = []
for it, (x, net) in itertools.izip(xrange(n * n), pgs_ls(A, b, p) ):
res.append(np.copy(net))
return res
for it, x in itertools.izip(xrange(2 * n * n), pocs(A, b, p)):
res.append(np.copy(x))
return res
def project_simplex(S, p):
m, n = S.shape
A = S.dot(S.T)
b = S.dot(p)
x = | np.ones(m) | numpy.ones |
import numpy as np
import os
import re
import requests
import sys
import time
from netCDF4 import Dataset
import pandas as pd
from bs4 import BeautifulSoup
from tqdm import tqdm
# setup constants used to access the data from the different M2M interfaces
BASE_URL = 'https://ooinet.oceanobservatories.org/api/m2m/' # base M2M URL
SENSOR_URL = '12576/sensor/inv/' # Sensor Information
# setup access credentials
AUTH = ['OOIAPI-853A3LA6QI3L62', '<KEY>']
def M2M_Call(uframe_dataset_name, start_date, end_date):
options = '?beginDT=' + start_date + '&endDT=' + end_date + '&format=application/netcdf'
r = requests.get(BASE_URL + SENSOR_URL + uframe_dataset_name + options, auth=(AUTH[0], AUTH[1]))
if r.status_code == requests.codes.ok:
data = r.json()
else:
return None
# wait until the request is completed
print('Waiting for OOINet to process and prepare data request, this may take up to 20 minutes')
url = [url for url in data['allURLs'] if re.match(r'.*async_results.*', url)][0]
check_complete = url + '/status.txt'
with tqdm(total=400, desc='Waiting') as bar:
for i in range(400):
r = requests.get(check_complete)
bar.update(1)
if r.status_code == requests.codes.ok:
bar.n = 400
bar.last_print_n = 400
bar.refresh()
print('\nrequest completed in %f minutes.' % elapsed)
break
else:
time.sleep(3)
elapsed = (i * 3) / 60
return data
def M2M_Files(data, tag=''):
"""
Use a regex tag combined with the results of the M2M data request to collect the data from the THREDDS catalog.
Collected data is gathered into an xarray dataset for further processing.
:param data: JSON object returned from M2M data request with details on where the data is to be found for download
:param tag: regex tag to use in discriminating the data files, so we only collect the correct ones
:return: the collected data as an xarray dataset
"""
# Create a list of the files from the request above using a simple regex as a tag to discriminate the files
url = [url for url in data['allURLs'] if re.match(r'.*thredds.*', url)][0]
files = list_files(url, tag)
return files
def list_files(url, tag=''):
"""
Function to create a list of the NetCDF data files in the THREDDS catalog created by a request to the M2M system.
:param url: URL to user's THREDDS catalog specific to a data request
:param tag: regex pattern used to distinguish files of interest
:return: list of files in the catalog with the URL path set relative to the catalog
"""
page = requests.get(url).text
soup = BeautifulSoup(page, 'html.parser')
pattern = re.compile(tag)
return [node.get('href') for node in soup.find_all('a', text=pattern)]
def M2M_Data(nclist,variables):
thredds = 'https://opendap.oceanobservatories.org/thredds/dodsC/ooi/'
#nclist is going to contain more than one url eventually
for jj in range(len(nclist)):
url=nclist[jj]
url=url[25:]
dap_url = thredds + url + '#fillmismatch'
openFile = Dataset(dap_url,'r')
for ii in range(len(variables)):
dum = openFile.variables[variables[ii].name]
variables[ii].data = np.append(variables[ii].data, dum[:].data)
tmp = variables[0].data/60/60/24
time_converted = pd.to_datetime(tmp, unit='D', origin=pd.Timestamp('1900-01-01'))
return variables, time_converted
class var(object):
def __init__(self):
"""A Class that generically holds data with a variable name
and the units as attributes"""
self.name = ''
self.data = np.array([])
self.units = ''
def __repr__(self):
return_str = "name: " + self.name + '\n'
return_str += "units: " + self.units + '\n'
return_str += "data: size: " + str(self.data.shape)
return return_str
class structtype(object):
def __init__(self):
""" A class that imitates a Matlab structure type
"""
self._data = []
def __getitem__(self, index):
"""implement index behavior in the struct"""
if index == len(self._data):
self._data.append(var())
return self._data[index]
def __len__(self):
return len(self._data)
def M2M_URLs(platform_name,node,instrument_class,method):
var_list = structtype()
#MOPAK
if platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/SBD17/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD11/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD11/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/SBD17/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD11/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD11/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSPM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/SBS01/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#METBK
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
#FLORT
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/SBD17/06-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/SBD17/06-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/04-FLORTK000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
#FDCHP
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'FDCHP' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/08-FDCHPA000/telemetered/fdchp_a_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#DOSTA
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD37/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD37/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD37/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD37/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/02-DOFSTK000/telemetered/dofst_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dofst_k_oxygen_l2'
var_list[2].name = 'dofst_k_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'Hz'
var_list[3].units = 'dbar'
#ADCP
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/01-ADCPTA000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/01-ADCPTC000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/01-ADCPTA000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/01-ADCPTC000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/04-ADCPTM000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/04-ADCPTM000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/04-ADCPTC000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/04-ADCPSJ000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
#ZPLSC
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#WAVSS
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_statistics'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_statistics'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_statistics'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_statistics'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
#VELPT
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/SBD17/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD11/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD11/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/SBD17/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD11/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD11/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
#PCO2W
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
#PHSEN
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
#SPKIR
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
#PRESF
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/02-PRESFA000/telemetered/presf_abc_dcl_tide_measurement'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/02-PRESFA000/telemetered/presf_abc_dcl_tide_measurement'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/02-PRESFB000/telemetered/presf_abc_dcl_tide_measurement'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/02-PRESFC000/telemetered/presf_abc_dcl_tide_measurement'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
#CTDBP
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD37/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/SBD17/06-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD37/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/SBD17/06-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD37/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD37/03-CTDBPE000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
#VEL3D
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/01-VEL3DD000/telemetered/vel3d_cd_dcl_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/01-VEL3DD000/telemetered/vel3d_cd_dcl_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/01-VEL3DD000/telemetered/vel3d_cd_dcl_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/01-VEL3DD000/telemetered/vel3d_cd_dcl_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
#VEL3DK
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/01-VEL3DK000/telemetered/vel3d_k_wfp_stc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_k_eastward_velocity'
var_list[2].name = 'vel3d_k_northward_velocity'
var_list[3].name = 'vel3d_k_upward_velocity'
var_list[4].name = 'vel3d_k_heading'
var_list[5].name = 'vel3d_k_pitch'
var_list[6].name = 'vel3d_k_roll'
var_list[7].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'ddegrees'
var_list[5].units = 'ddegrees'
var_list[6].units = 'ddegrees'
var_list[7].units = 'dbar'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/03-CTDPFK000/telemetered/ctdpf_ckl_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'ctdpf_ckl_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdpf_ckl_seawater_pressure'
var_list[5].name = 'ctdpf_ckl_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
#PCO2A
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
#PARAD
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/05-PARADK000/telemetered/parad_k__stc_imodem_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_k_par'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
#OPTAA
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD37/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD37/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD37/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD37/01-OPTAAC000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#NUTNR
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
##
#MOPAK
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/SBD17/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD11/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD11/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/SBD17/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD11/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD11/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSPM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSPM/SBS01/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#METBK
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
#FLORT
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/SBD17/06-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/SBD17/06-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID27/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID27/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID27/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID27/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
#FDCHP
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'FDCHP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD12/08-FDCHPA000/recovered_host/fdchp_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#DOSTA
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID27/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID27/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID27/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID27/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD37/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD37/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD37/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD37/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
#ADCP
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID26/01-ADCPTA000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID26/01-ADCPTC000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID26/01-ADCPTA000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID26/01-ADCPTC000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD35/04-ADCPTM000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD35/04-ADCPTM000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD35/04-ADCPTC000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD35/04-ADCPSJ000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
#WAVSS
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_statistics_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_statistics_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_statistics_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_statistics_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
#VELPT
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/SBD17/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD11/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD11/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
#uframe_dataset_name = 'CE06ISSM/RID16/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
uframe_dataset_name = 'CE06ISSM/RID16/04-VELPTA000/recovered_host/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD11/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD11/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID26/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID26/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID26/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID26/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
#PCO2W
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD35/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD35/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD35/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD35/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
#PHSEN
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID26/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID26/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID26/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID26/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD35/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD35/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD35/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD35/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
#SPKIR
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID26/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID26/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID26/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID26/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
#PRESF
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD35/02-PRESFA000/recovered_host/presf_abc_dcl_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD35/02-PRESFA000/recovered_host/presf_abc_dcl_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD35/02-PRESFB000/recovered_host/presf_abc_dcl_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD35/02-PRESFC000/recovered_host/presf_abc_dcl_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
#CTDBP
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD37/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/SBD17/06-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD37/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/SBD17/06-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID27/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID27/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID27/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID27/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD37/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD37/03-CTDBPE000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
#VEL3D
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD35/01-VEL3DD000/recovered_host/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD35/01-VEL3DD000/recovered_host/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD35/01-VEL3DD000/recovered_host/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD35/01-VEL3DD000/recovered_host/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
#PCO2A
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD12/04-PCO2AA000/recovered_host/pco2a_a_dcl_instrument_water_recovered'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD12/04-PCO2AA000/recovered_host/pco2a_a_dcl_instrument_water_recovered'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD12/04-PCO2AA000/recovered_host/pco2a_a_dcl_instrument_water_recovered'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD12/04-PCO2AA000/recovered_host/pco2a_a_dcl_instrument_water_recovered'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
#OPTAA
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID27/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID27/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID27/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID27/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD37/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD37/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD37/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD37/01-OPTAAC000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#NUTNR
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID26/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID26/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID26/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID26/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/RID16/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD37/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/SBD17/06-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/RID16/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD37/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/SBD17/06-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/RID27/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/RID27/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSSM/RID27/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/RID27/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD37/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD37/03-CTDBPE000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE09OSPM/WFP01/03-CTDPFK000/recovered_wfp/ctdpf_ckl_wfp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdpf_ckl_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdpf_ckl_seawater_pressure'
var_list[5].name = 'ctdpf_ckl_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/RID26/01-ADCPTA000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSSM/RID26/01-ADCPTC000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/RID26/01-ADCPTA000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/RID26/01-ADCPTC000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD35/04-ADCPTM000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD35/04-ADCPTM000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD35/04-ADCPTC000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD35/04-ADCPSJ000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD37/07-ZPLSCC000/recovered_inst/zplsc_echogram_data'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD37/07-ZPLSCC000/recovered_inst/zplsc_echogram_data'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD37/07-ZPLSCC000/recovered_inst/zplsc_echogram_data'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD37/07-ZPLSCC000/recovered_inst/zplsc_echogram_data'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/SBD17/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/SBD11/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSSM/SBD11/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/SBD17/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/SBD11/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/SBD11/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/RID16/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/RID26/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSSM/RID26/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/RID16/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/RID26/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/RID26/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE09OSPM/WFP01/01-VEL3DK000/recovered_wfp/vel3d_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_k_eastward_velocity'
var_list[2].name = 'vel3d_k_northward_velocity'
var_list[3].name = 'vel3d_k_upward_velocity'
var_list[4].name = 'vel3d_k_heading'
var_list[5].name = 'vel3d_k_pitch'
var_list[6].name = 'vel3d_k_roll'
var_list[7].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'ddegrees'
var_list[5].units = 'ddegrees'
var_list[6].units = 'ddegrees'
var_list[7].units = 'dbar'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD35/01-VEL3DD000/recovered_inst/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD35/01-VEL3DD000/recovered_inst/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD35/01-VEL3DD000/recovered_inst/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD35/01-VEL3DD000/recovered_inst/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD35/02-PRESFA000/recovered_inst/presf_abc_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'presf_tide_pressure'
var_list[2].name = 'presf_tide_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD35/02-PRESFA000/recovered_inst/presf_abc_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'presf_tide_pressure'
var_list[2].name = 'presf_tide_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD35/02-PRESFB000/recovered_inst/presf_abc_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'presf_tide_pressure'
var_list[2].name = 'presf_tide_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD35/02-PRESFC000/recovered_inst/presf_abc_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'presf_tide_pressure'
var_list[2].name = 'presf_tide_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/RID16/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/RID26/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSSM/RID26/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/RID16/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/RID26/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/RID26/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD35/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD35/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD35/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD35/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/RID16/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD35/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/RID16/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD35/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD35/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD35/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE09OSPM/WFP01/05-PARADK000/recovered_wfp/parad_k__stc_imodem_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_k_par'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/RID16/07-NUTNRB000/recovered_inst/suna_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/RID26/07-NUTNRB000/recovered_inst/suna_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSSM/RID26/07-NUTNRB000/recovered_inst/suna_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/RID16/07-NUTNRB000/recovered_inst/suna_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/RID26/07-NUTNRB000/recovered_inst/suna_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/RID26/07-NUTNRB000/recovered_inst/suna_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'FDCHP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/SBD12/08-FDCHPA000/recovered_inst/fdchp_a_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/SBD17/06-FLORTD000/recovered_inst/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/SBD17/06-FLORTD000/recovered_inst/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE09OSPM/WFP01/04-FLORTK000/recovered_wfp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE09OSPM/WFP01/02-DOFSTK000/recovered_wfp/dofst_k_wfp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dofst_k_oxygen_l2'
var_list[2].name = 'dofst_k_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'Hz'
var_list[3].units = 'dbar'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/RID16/03-DOSTAD000/recovered_inst/dosta_abcdjm_ctdbp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[3].name = 'ctdbp_seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/RID16/03-DOSTAD000/recovered_inst/dosta_abcdjm_ctdbp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[3].name = 'ctdbp_seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD37/03-DOSTAD000/recovered_inst/dosta_abcdjm_ctdbp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[3].name = 'ctdbp_seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD37/03-DOSTAD000/recovered_inst/dosta_abcdjm_ctdbp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[3].name = 'ctdbp_seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD37/03-DOSTAD000/recovered_inst/dosta_abcdjm_ctdbp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[3].name = 'ctdbp_seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD37/03-DOSTAD000/recovered_inst/dosta_abcdjm_ctdbp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[3].name = 'ctdbp_seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD35/04-ADCPTM000/recovered_inst/adcpt_m_instrument_log9_recovered'
var_list[0].name = 'time'
var_list[1].name = 'significant_wave_height'
var_list[2].name = 'peak_wave_period'
var_list[3].name = 'peak_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'seconds'
var_list[3].units = 'degrees'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD35/04-ADCPTM000/recovered_inst/adcpt_m_instrument_log9_recovered'
var_list[0].name = 'time'
var_list[1].name = 'significant_wave_height'
var_list[2].name = 'peak_wave_period'
var_list[3].name = 'peak_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'seconds'
var_list[3].units = 'degrees'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'CTD' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/06-CTDBPN106/streamed/ctdbp_no_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_no_seawater_pressure'
var_list[5].name = 'ctdbp_no_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'CTD' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/06-CTDBPO108/streamed/ctdbp_no_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_no_seawater_pressure'
var_list[5].name = 'ctdbp_no_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'DOSTA' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/06-CTDBPN106/streamed/ctdbp_no_sample'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'DOSTA' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/06-CTDBPO108/streamed/ctdbp_no_sample'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'PHSEN' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/10-PHSEND103/streamed/phsen_data_record'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'PHSEN' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/10-PHSEND107/streamed/phsen_data_record'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'PCO2W' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/09-PCO2WB103/streamed/pco2w_b_sami_data_record'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'PCO2W' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/09-PCO2WB104/streamed/pco2w_b_sami_data_record'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'ADCP' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/05-ADCPTB104/streamed/adcp_velocity_beam'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'ADCP' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/05-ADCPSI103/streamed/adcp_velocity_beam'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'VEL3D' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/07-VEL3DC108/streamed/vel3d_cd_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'VEL3D' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/07-VEL3DC107/streamed/vel3d_cd_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'OPTAA' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/08-OPTAAD106/streamed/optaa_sample'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'OPTAA' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/08-OPTAAC104/streamed/optaa_sample'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#CSPP Data below
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSP/SP001/08-FLORTJ000/telemetered/flort_dj_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/08-FLORTJ000/recovered_cspp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSP/SP001/08-FLORTJ000/telemetered/flort_dj_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/08-FLORTJ000/recovered_cspp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSP/SP001/02-DOSTAJ000/telemetered/dosta_abcdjm_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[4].name = 'optode_temperature'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'umol/L'
var_list[4].units = 'degC'
var_list[5].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/02-DOSTAJ000/recovered_cspp/dosta_abcdjm_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[4].name = 'optode_temperature'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'umol/L'
var_list[4].units = 'degC'
var_list[5].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSP/SP001/02-DOSTAJ000/telemetered/dosta_abcdjm_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[4].name = 'optode_temperature'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'umol/L'
var_list[4].units = 'degC'
var_list[5].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/02-DOSTAJ000/recovered_cspp/dosta_abcdjm_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[4].name = 'optode_temperature'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'umol/L'
var_list[4].units = 'degC'
var_list[5].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSP/SP001/09-CTDPFJ000/telemetered/ctdpf_j_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temperature'
var_list[2].name = 'salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/09-CTDPFJ000/recovered_cspp/ctdpf_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temperature'
var_list[2].name = 'salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSP/SP001/09-CTDPFJ000/telemetered/ctdpf_j_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temperature'
var_list[2].name = 'salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/09-CTDPFJ000/recovered_cspp/ctdpf_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temperature'
var_list[2].name = 'salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSP/SP001/10-PARADJ000/telemetered/parad_j_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_j_par_counts_output'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/10-PARADJ000/recovered_cspp/parad_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_j_par_counts_output'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSP/SP001/10-PARADJ000/telemetered/parad_j_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_j_par_counts_output'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/10-PARADJ000/recovered_cspp/parad_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_j_par_counts_output'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'NUTNR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/06-NUTNRJ000/recovered_cspp/nutnr_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'salinity_corrected_nitrate'
var_list[2].name = 'nitrate_concentration'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
var_list[3].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'NUTNR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/06-NUTNRJ000/recovered_cspp/nutnr_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'salinity_corrected_nitrate'
var_list[2].name = 'nitrate_concentration'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
var_list[3].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSP/SP001/07-SPKIRJ000/telemetered/spkir_abj_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'SPKIR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/07-SPKIRJ000/recovered_cspp/spkir_abj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSP/SP001/07-SPKIRJ000/telemetered/spkir_abj_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'SPKIR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/07-SPKIRJ000/recovered_cspp/spkir_abj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSP/SP001/05-VELPTJ000/telemetered/velpt_j_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'velpt_j_eastward_velocity'
var_list[2].name = 'velpt_j_northward_velocity'
var_list[3].name = 'velpt_j_upward_velocity'
var_list[4].name = 'heading'
var_list[5].name = 'roll'
var_list[6].name = 'pitch'
var_list[7].name = 'temperature'
var_list[8].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'degrees'
var_list[5].units = 'degrees'
var_list[6].units = 'degrees'
var_list[7].units = 'degC'
var_list[8].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'VELPT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/05-VELPTJ000/recovered_cspp/velpt_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'velpt_j_eastward_velocity'
var_list[2].name = 'velpt_j_northward_velocity'
var_list[3].name = 'velpt_j_upward_velocity'
var_list[4].name = 'heading'
var_list[5].name = 'roll'
var_list[6].name = 'pitch'
var_list[7].name = 'temperature'
var_list[8].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'degrees'
var_list[5].units = 'degrees'
var_list[6].units = 'degrees'
var_list[7].units = 'degC'
var_list[8].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSP/SP001/05-VELPTJ000/telemetered/velpt_j_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'velpt_j_eastward_velocity'
var_list[2].name = 'velpt_j_northward_velocity'
var_list[3].name = 'velpt_j_upward_velocity'
var_list[4].name = 'heading'
var_list[5].name = 'roll'
var_list[6].name = 'pitch'
var_list[7].name = 'temperature'
var_list[8].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'degrees'
var_list[5].units = 'degrees'
var_list[6].units = 'degrees'
var_list[7].units = 'degC'
var_list[8].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'VELPT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/05-VELPTJ000/recovered_cspp/velpt_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'velpt_j_eastward_velocity'
var_list[2].name = 'velpt_j_northward_velocity'
var_list[3].name = 'velpt_j_upward_velocity'
var_list[4].name = 'heading'
var_list[5].name = 'roll'
var_list[6].name = 'pitch'
var_list[7].name = 'temperature'
var_list[8].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'degrees'
var_list[5].units = 'degrees'
var_list[6].units = 'degrees'
var_list[7].units = 'degC'
var_list[8].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'OPTAA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/04-OPTAAJ000/recovered_cspp/optaa_dj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'OPTAA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/04-OPTAAJ000/recovered_cspp/optaa_dj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/07-FLORTJ000/recovered_cspp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/07-FLORTJ000/recovered_cspp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/01-DOSTAJ000/recovered_cspp/dosta_abcdjm_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[4].name = 'optode_temperature'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'umol/L'
var_list[4].units = 'degC'
var_list[5].units = 'dbar'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/01-DOSTAJ000/recovered_cspp/dosta_abcdjm_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[4].name = 'optode_temperature'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'umol/L'
var_list[4].units = 'degC'
var_list[5].units = 'dbar'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/08-CTDPFJ000/recovered_cspp/ctdpf_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temperature'
var_list[2].name = 'salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/08-CTDPFJ000/recovered_cspp/ctdpf_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temperature'
var_list[2].name = 'salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/09-PARADJ000/recovered_cspp/parad_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_j_par_counts_output'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/09-PARADJ000/recovered_cspp/parad_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_j_par_counts_output'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'NUTNR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/05-NUTNRJ000/recovered_cspp/nutnr_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'salinity_corrected_nitrate'
var_list[2].name = 'nitrate_concentration'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
var_list[3].units = 'dbar'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'NUTNR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/05-NUTNRJ000/recovered_cspp/nutnr_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'salinity_corrected_nitrate'
var_list[2].name = 'nitrate_concentration'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
var_list[3].units = 'dbar'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'SPKIR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/06-SPKIRJ000/recovered_cspp/spkir_abj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'SPKIR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/06-SPKIRJ000/recovered_cspp/spkir_abj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'VELPT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/02-VELPTJ000/recovered_cspp/velpt_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'velpt_j_eastward_velocity'
var_list[2].name = 'velpt_j_northward_velocity'
var_list[3].name = 'velpt_j_upward_velocity'
var_list[4].name = 'heading'
var_list[5].name = 'roll'
var_list[6].name = 'pitch'
var_list[7].name = 'temperature'
var_list[8].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'degrees'
var_list[5].units = 'degrees'
var_list[6].units = 'degrees'
var_list[7].units = 'degC'
var_list[8].units = 'dbar'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'VELPT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/02-VELPTJ000/recovered_cspp/velpt_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'velpt_j_eastward_velocity'
var_list[2].name = 'velpt_j_northward_velocity'
var_list[3].name = 'velpt_j_upward_velocity'
var_list[4].name = 'heading'
var_list[5].name = 'roll'
var_list[6].name = 'pitch'
var_list[7].name = 'temperature'
var_list[8].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'degrees'
var_list[5].units = 'degrees'
var_list[6].units = 'degrees'
var_list[7].units = 'degC'
var_list[8].units = 'dbar'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'OPTAA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/04-OPTAAJ000/recovered_cspp/optaa_dj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'OPTAA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/04-OPTAAJ000/recovered_cspp/optaa_dj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL386/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL386/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL384/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL384/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL383/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL383/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL382/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL382/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL381/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL381/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL327/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL327/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL326/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL326/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL320/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL320/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL319/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL319/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL312/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL312/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL311/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL311/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL247/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL247/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL386/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL386/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL384/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL384/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL383/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL383/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL382/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL382/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL381/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL381/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL327/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL327/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL326/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL326/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL320/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL320/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL319/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL319/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL312/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL312/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL311/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL311/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL247/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL247/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL386/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL386/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL384/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL384/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL383/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL383/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL382/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL382/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL381/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL381/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL327/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL327/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL326/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL326/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL320/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL320/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL319/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL319/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL312/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL312/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL311/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL311/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL247/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL247/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL386/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL386/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL384/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL384/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL383/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL383/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL382/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL382/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL381/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL381/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL327/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL327/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL326/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL326/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL320/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL320/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL319/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL319/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL312/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL312/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL311/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL311/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL247/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL247/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL386/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL384/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL383/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL382/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL381/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL327/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL326/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL320/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL319/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL312/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL311/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL247/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD11/06-METBKA000/telemetered/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD11/06-METBKA000/recovered_host/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD11/06-METBKA000/telemetered/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD11/06-METBKA000/recovered_host/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD11/06-METBKA000/telemetered/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD11/06-METBKA000/recovered_host/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD11/06-METBKA000/telemetered/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD11/06-METBKA000/recovered_host/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_MeanDir' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_mean_directional'
var_list[0].name = 'time'
var_list[1].name = 'mean_direction'
var_list[2].name = 'number_bands'
var_list[3].name = 'initial_frequency'
var_list[4].name = 'frequency_spacing'
var_list[5].name = 'psd_mean_directional'
var_list[6].name = 'mean_direction_array'
var_list[7].name = 'directional_spread_array'
var_list[8].name = 'spread_direction'
var_list[9].name = 'wavss_a_directional_frequency'
var_list[10].name = 'wavss_a_corrected_mean_wave_direction'
var_list[11].name = 'wavss_a_corrected_directional_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degrees'
var_list[2].units = '1'
var_list[3].units = 'Hz'
var_list[4].units = 'Hz'
var_list[5].units = 'm2 Hz-1'
var_list[6].units = 'degrees'
var_list[7].units = 'degrees'
var_list[8].units = 'degrees'
var_list[9].units = 'Hz'
var_list[10].units = 'deg'
var_list[11].units = 'deg'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_MeanDir' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_mean_directional_recovered'
var_list[0].name = 'time'
var_list[1].name = 'mean_direction'
var_list[2].name = 'number_bands'
var_list[3].name = 'initial_frequency'
var_list[4].name = 'frequency_spacing'
var_list[5].name = 'psd_mean_directional'
var_list[6].name = 'mean_direction_array'
var_list[7].name = 'directional_spread_array'
var_list[8].name = 'spread_direction'
var_list[9].name = 'wavss_a_directional_frequency'
var_list[10].name = 'wavss_a_corrected_mean_wave_direction'
var_list[11].name = 'wavss_a_corrected_directional_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degrees'
var_list[2].units = '1'
var_list[3].units = 'Hz'
var_list[4].units = 'Hz'
var_list[5].units = 'm2 Hz-1'
var_list[6].units = 'degrees'
var_list[7].units = 'degrees'
var_list[8].units = 'degrees'
var_list[9].units = 'Hz'
var_list[10].units = 'deg'
var_list[11].units = 'deg'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_MeanDir' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_mean_directional'
var_list[0].name = 'time'
var_list[1].name = 'mean_direction'
var_list[2].name = 'number_bands'
var_list[3].name = 'initial_frequency'
var_list[4].name = 'frequency_spacing'
var_list[5].name = 'psd_mean_directional'
var_list[6].name = 'mean_direction_array'
var_list[7].name = 'directional_spread_array'
var_list[8].name = 'spread_direction'
var_list[9].name = 'wavss_a_directional_frequency'
var_list[10].name = 'wavss_a_corrected_mean_wave_direction'
var_list[11].name = 'wavss_a_corrected_directional_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degrees'
var_list[2].units = '1'
var_list[3].units = 'Hz'
var_list[4].units = 'Hz'
var_list[5].units = 'm2 Hz-1'
var_list[6].units = 'degrees'
var_list[7].units = 'degrees'
var_list[8].units = 'degrees'
var_list[9].units = 'Hz'
var_list[10].units = 'deg'
var_list[11].units = 'deg'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_MeanDir' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_mean_directional_recovered'
var_list[0].name = 'time'
var_list[1].name = 'mean_direction'
var_list[2].name = 'number_bands'
var_list[3].name = 'initial_frequency'
var_list[4].name = 'frequency_spacing'
var_list[5].name = 'psd_mean_directional'
var_list[6].name = 'mean_direction_array'
var_list[7].name = 'directional_spread_array'
var_list[8].name = 'spread_direction'
var_list[9].name = 'wavss_a_directional_frequency'
var_list[10].name = 'wavss_a_corrected_mean_wave_direction'
var_list[11].name = 'wavss_a_corrected_directional_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degrees'
var_list[2].units = '1'
var_list[3].units = 'Hz'
var_list[4].units = 'Hz'
var_list[5].units = 'm2 Hz-1'
var_list[6].units = 'degrees'
var_list[7].units = 'degrees'
var_list[8].units = 'degrees'
var_list[9].units = 'Hz'
var_list[10].units = 'deg'
var_list[11].units = 'deg'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_MeanDir' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_mean_directional'
var_list[0].name = 'time'
var_list[1].name = 'mean_direction'
var_list[2].name = 'number_bands'
var_list[3].name = 'initial_frequency'
var_list[4].name = 'frequency_spacing'
var_list[5].name = 'psd_mean_directional'
var_list[6].name = 'mean_direction_array'
var_list[7].name = 'directional_spread_array'
var_list[8].name = 'spread_direction'
var_list[9].name = 'wavss_a_directional_frequency'
var_list[10].name = 'wavss_a_corrected_mean_wave_direction'
var_list[11].name = 'wavss_a_corrected_directional_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degrees'
var_list[2].units = '1'
var_list[3].units = 'Hz'
var_list[4].units = 'Hz'
var_list[5].units = 'm2 Hz-1'
var_list[6].units = 'degrees'
var_list[7].units = 'degrees'
var_list[8].units = 'degrees'
var_list[9].units = 'Hz'
var_list[10].units = 'deg'
var_list[11].units = 'deg'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_MeanDir' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_mean_directional_recovered'
var_list[0].name = 'time'
var_list[1].name = 'mean_direction'
var_list[2].name = 'number_bands'
var_list[3].name = 'initial_frequency'
var_list[4].name = 'frequency_spacing'
var_list[5].name = 'psd_mean_directional'
var_list[6].name = 'mean_direction_array'
var_list[7].name = 'directional_spread_array'
var_list[8].name = 'spread_direction'
var_list[9].name = 'wavss_a_directional_frequency'
var_list[10].name = 'wavss_a_corrected_mean_wave_direction'
var_list[11].name = 'wavss_a_corrected_directional_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degrees'
var_list[2].units = '1'
var_list[3].units = 'Hz'
var_list[4].units = 'Hz'
var_list[5].units = 'm2 Hz-1'
var_list[6].units = 'degrees'
var_list[7].units = 'degrees'
var_list[8].units = 'degrees'
var_list[9].units = 'Hz'
var_list[10].units = 'deg'
var_list[11].units = 'deg'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_MeanDir' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_mean_directional'
var_list[0].name = 'time'
var_list[1].name = 'mean_direction'
var_list[2].name = 'number_bands'
var_list[3].name = 'initial_frequency'
var_list[4].name = 'frequency_spacing'
var_list[5].name = 'psd_mean_directional'
var_list[6].name = 'mean_direction_array'
var_list[7].name = 'directional_spread_array'
var_list[8].name = 'spread_direction'
var_list[9].name = 'wavss_a_directional_frequency'
var_list[10].name = 'wavss_a_corrected_mean_wave_direction'
var_list[11].name = 'wavss_a_corrected_directional_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degrees'
var_list[2].units = '1'
var_list[3].units = 'Hz'
var_list[4].units = 'Hz'
var_list[5].units = 'm2 Hz-1'
var_list[6].units = 'degrees'
var_list[7].units = 'degrees'
var_list[8].units = 'degrees'
var_list[9].units = 'Hz'
var_list[10].units = 'deg'
var_list[11].units = 'deg'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_MeanDir' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_mean_directional_recovered'
var_list[0].name = 'time'
var_list[1].name = 'mean_direction'
var_list[2].name = 'number_bands'
var_list[3].name = 'initial_frequency'
var_list[4].name = 'frequency_spacing'
var_list[5].name = 'psd_mean_directional'
var_list[6].name = 'mean_direction_array'
var_list[7].name = 'directional_spread_array'
var_list[8].name = 'spread_direction'
var_list[9].name = 'wavss_a_directional_frequency'
var_list[10].name = 'wavss_a_corrected_mean_wave_direction'
var_list[11].name = 'wavss_a_corrected_directional_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degrees'
var_list[2].units = '1'
var_list[3].units = 'Hz'
var_list[4].units = 'Hz'
var_list[5].units = 'm2 Hz-1'
var_list[6].units = 'degrees'
var_list[7].units = 'degrees'
var_list[8].units = 'degrees'
var_list[9].units = 'Hz'
var_list[10].units = 'deg'
var_list[11].units = 'deg'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_NonDir' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_non_directional'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'psd_non_directional'
var_list[5].name = 'wavss_a_non_directional_frequency'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = 'm2 Hz-1'
var_list[5].units = 'Hz'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_NonDir' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_non_directional_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'psd_non_directional'
var_list[5].name = 'wavss_a_non_directional_frequency'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = 'm2 Hz-1'
var_list[5].units = 'Hz'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_NonDir' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_non_directional'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'psd_non_directional'
var_list[5].name = 'wavss_a_non_directional_frequency'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = 'm2 Hz-1'
var_list[5].units = 'Hz'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_NonDir' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_non_directional_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'psd_non_directional'
var_list[5].name = 'wavss_a_non_directional_frequency'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = 'm2 Hz-1'
var_list[5].units = 'Hz'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_NonDir' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_non_directional'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'psd_non_directional'
var_list[5].name = 'wavss_a_non_directional_frequency'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = 'm2 Hz-1'
var_list[5].units = 'Hz'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_NonDir' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_non_directional_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'psd_non_directional'
var_list[5].name = 'wavss_a_non_directional_frequency'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = 'm2 Hz-1'
var_list[5].units = 'Hz'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_NonDir' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_non_directional'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'psd_non_directional'
var_list[5].name = 'wavss_a_non_directional_frequency'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = 'm2 Hz-1'
var_list[5].units = 'Hz'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_NonDir' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_non_directional_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'psd_non_directional'
var_list[5].name = 'wavss_a_non_directional_frequency'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = 'm2 Hz-1'
var_list[5].units = 'Hz'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Motion' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_motion'
var_list[0].name = 'time'
var_list[1].name = 'number_time_samples'
var_list[2].name = 'initial_time'
var_list[3].name = 'time_spacing'
var_list[4].name = 'solution_found'
var_list[5].name = 'heave_offset_array'
var_list[6].name = 'north_offset_array'
var_list[7].name = 'east_offset_array'
var_list[8].name = 'wavss_a_buoymotion_time'
var_list[9].name = 'wavss_a_magcor_buoymotion_x'
var_list[10].name = 'wavss_a_magcor_buoymotion_y'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'sec'
var_list[3].units = 'sec'
var_list[4].units = '1'
var_list[5].units = 'm'
var_list[6].units = 'm'
var_list[7].units = 'm'
var_list[8].units = 'seconds since 1900-01-01'
var_list[9].units = 'm'
var_list[10].units = 'm'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Motion' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_motion_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_time_samples'
var_list[2].name = 'initial_time'
var_list[3].name = 'time_spacing'
var_list[4].name = 'solution_found'
var_list[5].name = 'heave_offset_array'
var_list[6].name = 'north_offset_array'
var_list[7].name = 'east_offset_array'
var_list[8].name = 'wavss_a_buoymotion_time'
var_list[9].name = 'wavss_a_magcor_buoymotion_x'
var_list[10].name = 'wavss_a_magcor_buoymotion_y'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'sec'
var_list[3].units = 'sec'
var_list[4].units = '1'
var_list[5].units = 'm'
var_list[6].units = 'm'
var_list[7].units = 'm'
var_list[8].units = 'seconds since 1900-01-01'
var_list[9].units = 'm'
var_list[10].units = 'm'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Motion' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_motion'
var_list[0].name = 'time'
var_list[1].name = 'number_time_samples'
var_list[2].name = 'initial_time'
var_list[3].name = 'time_spacing'
var_list[4].name = 'solution_found'
var_list[5].name = 'heave_offset_array'
var_list[6].name = 'north_offset_array'
var_list[7].name = 'east_offset_array'
var_list[8].name = 'wavss_a_buoymotion_time'
var_list[9].name = 'wavss_a_magcor_buoymotion_x'
var_list[10].name = 'wavss_a_magcor_buoymotion_y'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'sec'
var_list[3].units = 'sec'
var_list[4].units = '1'
var_list[5].units = 'm'
var_list[6].units = 'm'
var_list[7].units = 'm'
var_list[8].units = 'seconds since 1900-01-01'
var_list[9].units = 'm'
var_list[10].units = 'm'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Motion' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_motion_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_time_samples'
var_list[2].name = 'initial_time'
var_list[3].name = 'time_spacing'
var_list[4].name = 'solution_found'
var_list[5].name = 'heave_offset_array'
var_list[6].name = 'north_offset_array'
var_list[7].name = 'east_offset_array'
var_list[8].name = 'wavss_a_buoymotion_time'
var_list[9].name = 'wavss_a_magcor_buoymotion_x'
var_list[10].name = 'wavss_a_magcor_buoymotion_y'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'sec'
var_list[3].units = 'sec'
var_list[4].units = '1'
var_list[5].units = 'm'
var_list[6].units = 'm'
var_list[7].units = 'm'
var_list[8].units = 'seconds since 1900-01-01'
var_list[9].units = 'm'
var_list[10].units = 'm'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Motion' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_motion'
var_list[0].name = 'time'
var_list[1].name = 'number_time_samples'
var_list[2].name = 'initial_time'
var_list[3].name = 'time_spacing'
var_list[4].name = 'solution_found'
var_list[5].name = 'heave_offset_array'
var_list[6].name = 'north_offset_array'
var_list[7].name = 'east_offset_array'
var_list[8].name = 'wavss_a_buoymotion_time'
var_list[9].name = 'wavss_a_magcor_buoymotion_x'
var_list[10].name = 'wavss_a_magcor_buoymotion_y'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'sec'
var_list[3].units = 'sec'
var_list[4].units = '1'
var_list[5].units = 'm'
var_list[6].units = 'm'
var_list[7].units = 'm'
var_list[8].units = 'seconds since 1900-01-01'
var_list[9].units = 'm'
var_list[10].units = 'm'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Motion' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_motion_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_time_samples'
var_list[2].name = 'initial_time'
var_list[3].name = 'time_spacing'
var_list[4].name = 'solution_found'
var_list[5].name = 'heave_offset_array'
var_list[6].name = 'north_offset_array'
var_list[7].name = 'east_offset_array'
var_list[8].name = 'wavss_a_buoymotion_time'
var_list[9].name = 'wavss_a_magcor_buoymotion_x'
var_list[10].name = 'wavss_a_magcor_buoymotion_y'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'sec'
var_list[3].units = 'sec'
var_list[4].units = '1'
var_list[5].units = 'm'
var_list[6].units = 'm'
var_list[7].units = 'm'
var_list[8].units = 'seconds since 1900-01-01'
var_list[9].units = 'm'
var_list[10].units = 'm'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Motion' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_motion'
var_list[0].name = 'time'
var_list[1].name = 'number_time_samples'
var_list[2].name = 'initial_time'
var_list[3].name = 'time_spacing'
var_list[4].name = 'solution_found'
var_list[5].name = 'heave_offset_array'
var_list[6].name = 'north_offset_array'
var_list[7].name = 'east_offset_array'
var_list[8].name = 'wavss_a_buoymotion_time'
var_list[9].name = 'wavss_a_magcor_buoymotion_x'
var_list[10].name = 'wavss_a_magcor_buoymotion_y'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'sec'
var_list[3].units = 'sec'
var_list[4].units = '1'
var_list[5].units = 'm'
var_list[6].units = 'm'
var_list[7].units = 'm'
var_list[8].units = 'seconds since 1900-01-01'
var_list[9].units = 'm'
var_list[10].units = 'm'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Motion' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_motion_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_time_samples'
var_list[2].name = 'initial_time'
var_list[3].name = 'time_spacing'
var_list[4].name = 'solution_found'
var_list[5].name = 'heave_offset_array'
var_list[6].name = 'north_offset_array'
var_list[7].name = 'east_offset_array'
var_list[8].name = 'wavss_a_buoymotion_time'
var_list[9].name = 'wavss_a_magcor_buoymotion_x'
var_list[10].name = 'wavss_a_magcor_buoymotion_y'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'sec'
var_list[3].units = 'sec'
var_list[4].units = '1'
var_list[5].units = 'm'
var_list[6].units = 'm'
var_list[7].units = 'm'
var_list[8].units = 'seconds since 1900-01-01'
var_list[9].units = 'm'
var_list[10].units = 'm'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Fourier' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_fourier'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'number_directional_bands'
var_list[5].name = 'initial_directional_frequency'
var_list[6].name = 'directional_frequency_spacing'
var_list[7].name = 'fourier_coefficient_2d_array'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = '1'
var_list[5].units = 'Hz'
var_list[6].units = 'Hz'
var_list[7].units = '1'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Fourier' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_fourier_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'number_directional_bands'
var_list[5].name = 'initial_directional_frequency'
var_list[6].name = 'directional_frequency_spacing'
var_list[7].name = 'fourier_coefficient_2d_array'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = '1'
var_list[5].units = 'Hz'
var_list[6].units = 'Hz'
var_list[7].units = '1'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Fourier' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_fourier'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'number_directional_bands'
var_list[5].name = 'initial_directional_frequency'
var_list[6].name = 'directional_frequency_spacing'
var_list[7].name = 'fourier_coefficient_2d_array'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = '1'
var_list[5].units = 'Hz'
var_list[6].units = 'Hz'
var_list[7].units = '1'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Fourier' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_fourier_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'number_directional_bands'
var_list[5].name = 'initial_directional_frequency'
var_list[6].name = 'directional_frequency_spacing'
var_list[7].name = 'fourier_coefficient_2d_array'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = '1'
var_list[5].units = 'Hz'
var_list[6].units = 'Hz'
var_list[7].units = '1'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Fourier' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_fourier'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'number_directional_bands'
var_list[5].name = 'initial_directional_frequency'
var_list[6].name = 'directional_frequency_spacing'
var_list[7].name = 'fourier_coefficient_2d_array'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = '1'
var_list[5].units = 'Hz'
var_list[6].units = 'Hz'
var_list[7].units = '1'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Fourier' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_fourier_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'number_directional_bands'
var_list[5].name = 'initial_directional_frequency'
var_list[6].name = 'directional_frequency_spacing'
var_list[7].name = 'fourier_coefficient_2d_array'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = '1'
var_list[5].units = 'Hz'
var_list[6].units = 'Hz'
var_list[7].units = '1'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Fourier' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_fourier'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'number_directional_bands'
var_list[5].name = 'initial_directional_frequency'
var_list[6].name = 'directional_frequency_spacing'
var_list[7].name = 'fourier_coefficient_2d_array'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = '1'
var_list[5].units = 'Hz'
var_list[6].units = 'Hz'
var_list[7].units = '1'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Fourier' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_fourier_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'number_directional_bands'
var_list[5].name = 'initial_directional_frequency'
var_list[6].name = 'directional_frequency_spacing'
var_list[7].name = 'fourier_coefficient_2d_array'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = '1'
var_list[5].units = 'Hz'
var_list[6].units = 'Hz'
var_list[7].units = '1'
elif platform_name == 'CE04OSPS' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/SF01B/2A-CTDPFA107/streamed/ctdpf_sbe43_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'seawater_pressure'
var_list[5].name = 'seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSPD' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSPD/DP01B/01-CTDPFL105/recovered_inst/dpc_ctd_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'dpc_ctd_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSPD' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE04OSPD/DP01B/01-CTDPFL105/recovered_wfp/dpc_ctd_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'dpc_ctd_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSPS' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/SF01B/2A-CTDPFA107/streamed/ctdpf_sbe43_sample'
var_list[0].name = 'time'
var_list[1].name = 'corrected_dissolved_oxygen'
var_list[2].name = 'seawater_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'dbar'
elif platform_name == 'CE04OSPD' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSPD/DP01B/06-DOSTAD105/recovered_inst/dpc_optode_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'dbar'
elif platform_name == 'CE04OSPD' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE04OSPD/DP01B/06-DOSTAD105/recovered_wfp/dpc_optode_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'dbar'
elif platform_name == 'CE04OSPS' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/SF01B/3A-FLORTD104/streamed/flort_d_data_record'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE04OSPD' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSPD/DP01B/04-FLNTUA103/recovered_inst/dpc_flnturtd_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'flntu_x_mmp_cds_fluorometric_chlorophyll_a'
var_list[2].name = 'flntu_x_mmp_cds_total_volume_scattering_coefficient '
var_list[3].name = 'flntu_x_mmp_cds_bback_total'
var_list[4].name = 'flcdr_x_mmp_cds_fluorometric_cdom'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'ug/L'
var_list[2].units = 'm-1 sr-1'
var_list[3].units = 'm-1'
var_list[4].units = 'ppb'
var_list[5].units = 'dbar'
elif platform_name == 'CE04OSPD' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE04OSPD/DP01B/03-FLCDRA103/recovered_wfp/dpc_flcdrtd_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'flntu_x_mmp_cds_fluorometric_chlorophyll_a'
var_list[2].name = 'flntu_x_mmp_cds_total_volume_scattering_coefficient '
var_list[3].name = 'flntu_x_mmp_cds_bback_total'
var_list[4].name = 'flcdr_x_mmp_cds_fluorometric_cdom'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'ug/L'
var_list[2].units = 'm-1 sr-1'
var_list[3].units = 'm-1'
var_list[4].units = 'ppb'
var_list[5].units = 'dbar'
elif platform_name == 'CE04OSPS' and node == 'PROFILER' and instrument_class == 'PHSEN' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/SF01B/2B-PHSENA108/streamed/phsen_data_record'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'ph_seawater'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'dbar'
elif platform_name == 'CE04OSPS' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/SF01B/3C-PARADA102/streamed/parad_sa_sample'
var_list[0].name = 'time'
var_list[1].name = 'par_counts_output'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE04OSPS' and node == 'PROFILER' and instrument_class == 'SPKIR' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/SF01B/3D-SPKIRA102/streamed/spkir_data_record'
var_list[0].name = 'time'
var_list[1].name = 'spkir_downwelling_vector'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE04OSPS' and node == 'PROFILER' and instrument_class == 'NUTNR' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/SF01B/4A-NUTNRA102/streamed/nutnr_a_sample'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
var_list[3].units = 'dbar'
elif platform_name == 'CE04OSPS' and node == 'PROFILER' and instrument_class == 'PCO2W' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/SF01B/4F-PCO2WA102/streamed/pco2w_a_sami_data_record'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
var_list[3].units = 'dbar'
elif platform_name == 'CE04OSPS' and node == 'PROFILER' and instrument_class == 'VELPT' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/SF01B/4B-VELPTD106/streamed/velpt_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'velpt_d_eastward_velocity'
var_list[2].name = 'velpt_d_northward_velocity'
var_list[3].name = 'velpt_d_upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[9].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
var_list[9].units = 'dbar'
elif platform_name == 'CE04OSPD' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSPD/DP01B/02-VEL3DA105/recovered_inst/dpc_acm_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_a_eastward_velocity'
var_list[2].name = 'vel3d_a_northward_velocity'
var_list[3].name = 'vel3d_a_upward_velocity_ascending'
var_list[4].name = 'vel3d_a_upward_velocity_descending'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'm/s'
var_list[5].units = 'dbar'
elif platform_name == 'CE04OSPD' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE04OSPD/DP01B/02-VEL3DA105/recovered_wfp/dpc_acm_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_a_eastward_velocity'
var_list[2].name = 'vel3d_a_northward_velocity'
var_list[3].name = 'vel3d_a_upward_velocity_ascending'
var_list[4].name = 'vel3d_a_upward_velocity_descending'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'm/s'
var_list[5].units = 'dbar'
elif platform_name == 'CE04OSPS' and node == 'PLATFORM200M' and instrument_class == 'CTD' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/PC01B/4A-CTDPFA109/streamed/ctdpf_optode_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'seawater_pressure'
var_list[5].name = 'seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSPS' and node == 'PLATFORM200M' and instrument_class == 'DOSTA' and method == 'Streamed':
#uframe_dataset_name = 'CE04OSPS/PC01B/4A-DOSTAD109/streamed/ctdpf_optode_sample'
uframe_dataset_name = 'CE04OSPS/PC01B/4A-CTDPFA109/streamed/ctdpf_optode_sample'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'seawater_pressure' #also use this for the '4A-DOSTAD109/streamed/ctdpf_optode_sample' stream
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'dbar'
elif platform_name == 'CE04OSPS' and node == 'PLATFORM200M' and instrument_class == 'PHSEN' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/PC01B/4B-PHSENA106/streamed/phsen_data_record'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE04OSPS' and node == 'PLATFORM200M' and instrument_class == 'PCO2W' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/PC01B/4D-PCO2WA105/streamed/pco2w_a_sami_data_record'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
#Coastal Pioneer CSM Data Streams
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'METBK2' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/SBD12/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'METBK2' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/SBD12/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CP03ISSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CP03ISSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CP04OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CP04OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
#WAVSS
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_statistics'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_statistics_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'WAVSS_MeanDir' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_mean_directional'
var_list[0].name = 'time'
var_list[1].name = 'mean_direction'
var_list[2].name = 'number_bands'
var_list[3].name = 'initial_frequency'
var_list[4].name = 'frequency_spacing'
var_list[5].name = 'psd_mean_directional'
var_list[6].name = 'mean_direction_array'
var_list[7].name = 'directional_spread_array'
var_list[8].name = 'spread_direction'
var_list[9].name = 'wavss_a_directional_frequency'
var_list[10].name = 'wavss_a_corrected_mean_wave_direction'
var_list[11].name = 'wavss_a_corrected_directional_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degrees'
var_list[2].units = '1'
var_list[3].units = 'Hz'
var_list[4].units = 'Hz'
var_list[5].units = 'm2 Hz-1'
var_list[6].units = 'degrees'
var_list[7].units = 'degrees'
var_list[8].units = 'degrees'
var_list[9].units = 'Hz'
var_list[10].units = 'deg'
var_list[11].units = 'deg'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'WAVSS_MeanDir' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_mean_directional_recovered'
var_list[0].name = 'time'
var_list[1].name = 'mean_direction'
var_list[2].name = 'number_bands'
var_list[3].name = 'initial_frequency'
var_list[4].name = 'frequency_spacing'
var_list[5].name = 'psd_mean_directional'
var_list[6].name = 'mean_direction_array'
var_list[7].name = 'directional_spread_array'
var_list[8].name = 'spread_direction'
var_list[9].name = 'wavss_a_directional_frequency'
var_list[10].name = 'wavss_a_corrected_mean_wave_direction'
var_list[11].name = 'wavss_a_corrected_directional_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degrees'
var_list[2].units = '1'
var_list[3].units = 'Hz'
var_list[4].units = 'Hz'
var_list[5].units = 'm2 Hz-1'
var_list[6].units = 'degrees'
var_list[7].units = 'degrees'
var_list[8].units = 'degrees'
var_list[9].units = 'Hz'
var_list[10].units = 'deg'
var_list[11].units = 'deg'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'WAVSS_NonDir' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_non_directional'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'psd_non_directional'
var_list[5].name = 'wavss_a_non_directional_frequency'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = 'm2 Hz-1'
var_list[5].units = 'Hz'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'WAVSS_NonDir' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_non_directional_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'psd_non_directional'
var_list[5].name = 'wavss_a_non_directional_frequency'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = 'm2 Hz-1'
var_list[5].units = 'Hz'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'WAVSS_Motion' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_motion'
var_list[0].name = 'time'
var_list[1].name = 'number_time_samples'
var_list[2].name = 'initial_time'
var_list[3].name = 'time_spacing'
var_list[4].name = 'solution_found'
var_list[5].name = 'heave_offset_array'
var_list[6].name = 'north_offset_array'
var_list[7].name = 'east_offset_array'
var_list[8].name = 'wavss_a_buoymotion_time'
var_list[9].name = 'wavss_a_magcor_buoymotion_x'
var_list[10].name = 'wavss_a_magcor_buoymotion_y'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'sec'
var_list[3].units = 'sec'
var_list[4].units = '1'
var_list[5].units = 'm'
var_list[6].units = 'm'
var_list[7].units = 'm'
var_list[8].units = 'seconds since 1900-01-01'
var_list[9].units = 'm'
var_list[10].units = 'm'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'WAVSS_Motion' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_motion_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_time_samples'
var_list[2].name = 'initial_time'
var_list[3].name = 'time_spacing'
var_list[4].name = 'solution_found'
var_list[5].name = 'heave_offset_array'
var_list[6].name = 'north_offset_array'
var_list[7].name = 'east_offset_array'
var_list[8].name = 'wavss_a_buoymotion_time'
var_list[9].name = 'wavss_a_magcor_buoymotion_x'
var_list[10].name = 'wavss_a_magcor_buoymotion_y'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'sec'
var_list[3].units = 'sec'
var_list[4].units = '1'
var_list[5].units = 'm'
var_list[6].units = 'm'
var_list[7].units = 'm'
var_list[8].units = 'seconds since 1900-01-01'
var_list[9].units = 'm'
var_list[10].units = 'm'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'WAVSS_Fourier' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_fourier'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'number_directional_bands'
var_list[5].name = 'initial_directional_frequency'
var_list[6].name = 'directional_frequency_spacing'
var_list[7].name = 'fourier_coefficient_2d_array'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = '1'
var_list[5].units = 'Hz'
var_list[6].units = 'Hz'
var_list[7].units = '1'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'WAVSS_Fourier' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_fourier_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'number_directional_bands'
var_list[5].name = 'initial_directional_frequency'
var_list[6].name = 'directional_frequency_spacing'
var_list[7].name = 'fourier_coefficient_2d_array'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = '1'
var_list[5].units = 'Hz'
var_list[6].units = 'Hz'
var_list[7].units = '1'
#PCO2A
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CP03ISSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CP04OSSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
#PCO2A
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/SBD12/04-PCO2AA000/recovered_host/pco2a_a_dcl_instrument_water_recovered'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CP03ISSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/SBD12/04-PCO2AA000/recovered_host/pco2a_a_dcl_instrument_water_recovered'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CP04OSSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/SBD12/04-PCO2AA000/recovered_host/pco2a_a_dcl_instrument_water_recovered'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
#FDCHP
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'FDCHP' and method == 'RecoveredInst':
uframe_dataset_name = 'CP01CNSM/SBD12/08-FDCHPA000/recovered_inst/fdchp_a_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'FDCHP' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/SBD12/08-FDCHPA000/telemetered/fdchp_a_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'FDCHP' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/SBD12/08-FDCHPA000/recovered_host/fdchp_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/SBD11/06-METBKA000/telemetered/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/SBD11/06-METBKA000/recovered_host/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CP03ISSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/SBD11/06-METBKA000/telemetered/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CP03ISSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/SBD11/06-METBKA000/recovered_host/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CP04OSSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/SBD11/06-METBKA000/telemetered/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CP04OSSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/SBD11/06-METBKA000/recovered_host/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'METBK2-hr' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/SBD12/06-METBKA000/telemetered/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'METBK2-hr' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/SBD12/06-METBKA000/recovered_host/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/RID27/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CP01CNSM/RID27/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/RID27/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CP03ISSM/RID27/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/RID27/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CP04OSSM/RID27/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/MFD37/03-CTDBPE000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/MFD37/03-CTDBPE000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CP04OSSM/MFD37/03-CTDBPE000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/MFD37/03-CTDBPD000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/MFD37/03-CTDBPD000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CP03ISSM/MFD37/03-CTDBPD000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/MFD37/03-CTDBPD000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/MFD37/03-CTDBPD000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CP01CNSM/MFD37/03-CTDBPD000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/MFD37/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/RID27/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/MFD37/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/MFD37/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/RID27/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/MFD37/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/MFD37/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/RID27/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/MFD37/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CP01CNSM/RID26/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CP03ISSM/RID26/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CP04OSSM/RID26/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/RID26/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/RID26/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/RID26/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/RID27/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/RID27/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/RID27/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/RID26/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/RID26/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/RID26/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/RID27/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/RID27/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/RID27/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/RID26/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CP01CNSM/RID26/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/RID26/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CP03ISSM/RID26/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/RID26/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CP04OSSM/RID26/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/MFD35/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/MFD35/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/MFD35/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CP01CNSM/MFD35/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CP03ISSM/MFD35/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CP04OSSM/MFD35/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CP01CNSM/MFD35/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/MFD35/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CP03ISSM/MFD35/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/MFD35/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CP04OSSM/MFD35/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/MFD35/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/MFD35/02-PRESFB000/recovered_host/presf_abc_dcl_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredInst':
uframe_dataset_name = 'CP01CNSM/MFD35/02-PRESFB000/recovered_inst/presf_abc_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'presf_tide_pressure'
var_list[2].name = 'presf_tide_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/MFD35/02-PRESFB000/telemetered/presf_abc_dcl_tide_measurement'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/MFD35/02-PRESFB000/recovered_host/presf_abc_dcl_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredInst':
uframe_dataset_name = 'CP03ISSM/MFD35/02-PRESFB000/recovered_inst/presf_abc_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'presf_tide_pressure'
var_list[2].name = 'presf_tide_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/MFD35/02-PRESFB000/telemetered/presf_abc_dcl_tide_measurement'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/MFD35/02-PRESFC000/recovered_host/presf_abc_dcl_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredInst':
uframe_dataset_name = 'CP04OSSM/MFD35/02-PRESFC000/recovered_inst/presf_abc_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'presf_tide_pressure'
var_list[2].name = 'presf_tide_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/MFD35/02-PRESFC000/telemetered/presf_abc_dcl_tide_measurement'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CP01CNSM/MFD35/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/MFD35/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/MFD35/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CP03ISSM/MFD35/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/MFD35/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/MFD35/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CP04OSSM/MFD35/04-VELPTB000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/MFD35/04-VELPTB000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/MFD35/04-VELPTB000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/MFD37/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/MFD37/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/MFD37/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/MFD37/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/MFD37/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/MFD37/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredInst':
uframe_dataset_name = 'CP01CNSM/MFD37/07-ZPLSCC000/recovered_inst/zplsc_echogram_data'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredInst':
uframe_dataset_name = 'CP03ISSM/MFD37/07-ZPLSCC000/recovered_inst/zplsc_echogram_data'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredInst':
uframe_dataset_name = 'CP04OSSM/MFD37/07-ZPLSCC000/recovered_inst/zplsc_echogram_data'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/MFD35/01-ADCPTF000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CP01CNSM/MFD35/01-ADCPTF000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/MFD35/01-ADCPTF000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CP03ISSM/MFD35/01-ADCPTF000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/MFD35/01-ADCPSJ000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CP04OSSM/MFD35/01-ADCPSJ000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
#Coastal Pioneer WireFollowing Profilers (WFP
elif platform_name == 'CP04OSPM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSPM/SBS11/02-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP04OSPM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSPM/SBS11/02-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP04OSPM' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSPM/WFP01/04-FLORTK000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CP04OSPM' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP04OSPM/WFP01/04-FLORTK000/recovered_wfp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CP04OSPM' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSPM/WFP01/02-DOFSTK000/telemetered/dofst_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dofst_k_oxygen_l2'
var_list[2].name = 'dofst_k_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'Hz'
var_list[3].units = 'dbar'
elif platform_name == 'CP04OSPM' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP04OSPM/WFP01/02-DOFSTK000/recovered_wfp/dofst_k_wfp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dofst_k_oxygen_l2'
var_list[2].name = 'dofst_k_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'Hz'
var_list[3].units = 'dbar'
elif platform_name == 'CP04OSPM' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSPM/WFP01/01-VEL3DK000/telemetered/vel3d_k_wfp_stc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_k_eastward_velocity'
var_list[2].name = 'vel3d_k_northward_velocity'
var_list[3].name = 'vel3d_k_upward_velocity'
var_list[4].name = 'vel3d_k_heading'
var_list[5].name = 'vel3d_k_pitch'
var_list[6].name = 'vel3d_k_roll'
var_list[7].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'ddegrees'
var_list[5].units = 'ddegrees'
var_list[6].units = 'ddegrees'
var_list[7].units = 'dbar'
elif platform_name == 'CP04OSPM' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP04OSPM/WFP01/01-VEL3DK000/recovered_wfp/vel3d_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_k_eastward_velocity'
var_list[2].name = 'vel3d_k_northward_velocity'
var_list[3].name = 'vel3d_k_upward_velocity'
var_list[4].name = 'vel3d_k_heading'
var_list[5].name = 'vel3d_k_pitch'
var_list[6].name = 'vel3d_k_roll'
var_list[7].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'ddegrees'
var_list[5].units = 'ddegrees'
var_list[6].units = 'ddegrees'
var_list[7].units = 'dbar'
elif platform_name == 'CP04OSPM' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSPM/WFP01/03-CTDPFK000/telemetered/ctdpf_ckl_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'ctdpf_ckl_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdpf_ckl_seawater_pressure'
var_list[5].name = 'ctdpf_ckl_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP04OSPM' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP04OSPM/WFP01/03-CTDPFK000/recovered_wfp/ctdpf_ckl_wfp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdpf_ckl_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdpf_ckl_seawater_pressure'
var_list[5].name = 'ctdpf_ckl_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP04OSPM' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSPM/WFP01/05-PARADK000/telemetered/parad_k__stc_imodem_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_k_par'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CP04OSPM' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP04OSPM/WFP01/05-PARADK000/recovered_wfp/parad_k__stc_imodem_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_k_par'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CP01CNPM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNPM/SBS01/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP01CNPM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNPM/SBS01/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP01CNPM' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNPM/WFP01/04-FLORTK000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CP01CNPM' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP01CNPM/WFP01/04-FLORTK000/recovered_wfp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CP01CNPM' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNPM/WFP01/02-DOFSTK000/telemetered/dofst_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dofst_k_oxygen_l2'
var_list[2].name = 'dofst_k_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'Hz'
var_list[3].units = 'dbar'
elif platform_name == 'CP01CNPM' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP01CNPM/WFP01/02-DOFSTK000/recovered_wfp/dofst_k_wfp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dofst_k_oxygen_l2'
var_list[2].name = 'dofst_k_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'Hz'
var_list[3].units = 'dbar'
elif platform_name == 'CP01CNPM' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNPM/WFP01/01-VEL3DK000/telemetered/vel3d_k_wfp_stc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_k_eastward_velocity'
var_list[2].name = 'vel3d_k_northward_velocity'
var_list[3].name = 'vel3d_k_upward_velocity'
var_list[4].name = 'vel3d_k_heading'
var_list[5].name = 'vel3d_k_pitch'
var_list[6].name = 'vel3d_k_roll'
var_list[7].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'ddegrees'
var_list[5].units = 'ddegrees'
var_list[6].units = 'ddegrees'
var_list[7].units = 'dbar'
elif platform_name == 'CP01CNPM' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP01CNPM/WFP01/01-VEL3DK000/recovered_wfp/vel3d_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_k_eastward_velocity'
var_list[2].name = 'vel3d_k_northward_velocity'
var_list[3].name = 'vel3d_k_upward_velocity'
var_list[4].name = 'vel3d_k_heading'
var_list[5].name = 'vel3d_k_pitch'
var_list[6].name = 'vel3d_k_roll'
var_list[7].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'ddegrees'
var_list[5].units = 'ddegrees'
var_list[6].units = 'ddegrees'
var_list[7].units = 'dbar'
elif platform_name == 'CP01CNPM' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNPM/WFP01/03-CTDPFK000/telemetered/ctdpf_ckl_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'ctdpf_ckl_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdpf_ckl_seawater_pressure'
var_list[5].name = 'ctdpf_ckl_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP01CNPM' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP01CNPM/WFP01/03-CTDPFK000/recovered_wfp/ctdpf_ckl_wfp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdpf_ckl_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdpf_ckl_seawater_pressure'
var_list[5].name = 'ctdpf_ckl_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP01CNPM' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNPM/WFP01/05-PARADK000/telemetered/parad_k__stc_imodem_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_k_par'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CP01CNPM' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP01CNPM/WFP01/05-PARADK000/recovered_wfp/parad_k__stc_imodem_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_k_par'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CP02PMCI' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMCI/SBS01/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP02PMCI' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CP02PMCI/SBS01/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP02PMCI' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMCI/WFP01/04-FLORTK000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CP02PMCI' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP02PMCI/WFP01/04-FLORTK000/recovered_wfp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CP02PMCI' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMCI/WFP01/02-DOFSTK000/telemetered/dofst_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dofst_k_oxygen_l2'
var_list[2].name = 'dofst_k_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'Hz'
var_list[3].units = 'dbar'
elif platform_name == 'CP02PMCI' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP02PMCI/WFP01/02-DOFSTK000/recovered_wfp/dofst_k_wfp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dofst_k_oxygen_l2'
var_list[2].name = 'dofst_k_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'Hz'
var_list[3].units = 'dbar'
elif platform_name == 'CP02PMCI' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMCI/WFP01/01-VEL3DK000/telemetered/vel3d_k_wfp_stc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_k_eastward_velocity'
var_list[2].name = 'vel3d_k_northward_velocity'
var_list[3].name = 'vel3d_k_upward_velocity'
var_list[4].name = 'vel3d_k_heading'
var_list[5].name = 'vel3d_k_pitch'
var_list[6].name = 'vel3d_k_roll'
var_list[7].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'ddegrees'
var_list[5].units = 'ddegrees'
var_list[6].units = 'ddegrees'
var_list[7].units = 'dbar'
elif platform_name == 'CP02PMCI' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP02PMCI/WFP01/01-VEL3DK000/recovered_wfp/vel3d_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_k_eastward_velocity'
var_list[2].name = 'vel3d_k_northward_velocity'
var_list[3].name = 'vel3d_k_upward_velocity'
var_list[4].name = 'vel3d_k_heading'
var_list[5].name = 'vel3d_k_pitch'
var_list[6].name = 'vel3d_k_roll'
var_list[7].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'ddegrees'
var_list[5].units = 'ddegrees'
var_list[6].units = 'ddegrees'
var_list[7].units = 'dbar'
elif platform_name == 'CP02PMCI' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMCI/WFP01/03-CTDPFK000/telemetered/ctdpf_ckl_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'ctdpf_ckl_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdpf_ckl_seawater_pressure'
var_list[5].name = 'ctdpf_ckl_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP02PMCI' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP02PMCI/WFP01/03-CTDPFK000/recovered_wfp/ctdpf_ckl_wfp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdpf_ckl_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdpf_ckl_seawater_pressure'
var_list[5].name = 'ctdpf_ckl_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP02PMCI' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMCI/WFP01/05-PARADK000/telemetered/parad_k__stc_imodem_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_k_par'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CP02PMCI' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP02PMCI/WFP01/05-PARADK000/recovered_wfp/parad_k__stc_imodem_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_k_par'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CP02PMCO' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMCO/SBS01/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP02PMCO' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CP02PMCO/SBS01/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP02PMCO' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMCO/WFP01/04-FLORTK000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CP02PMCO' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP02PMCO/WFP01/04-FLORTK000/recovered_wfp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CP02PMCO' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMCO/WFP01/02-DOFSTK000/telemetered/dofst_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dofst_k_oxygen_l2'
var_list[2].name = 'dofst_k_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'Hz'
var_list[3].units = 'dbar'
elif platform_name == 'CP02PMCO' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP02PMCO/WFP01/02-DOFSTK000/recovered_wfp/dofst_k_wfp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dofst_k_oxygen_l2'
var_list[2].name = 'dofst_k_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'Hz'
var_list[3].units = 'dbar'
elif platform_name == 'CP02PMCO' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMCO/WFP01/01-VEL3DK000/telemetered/vel3d_k_wfp_stc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_k_eastward_velocity'
var_list[2].name = 'vel3d_k_northward_velocity'
var_list[3].name = 'vel3d_k_upward_velocity'
var_list[4].name = 'vel3d_k_heading'
var_list[5].name = 'vel3d_k_pitch'
var_list[6].name = 'vel3d_k_roll'
var_list[7].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'ddegrees'
var_list[5].units = 'ddegrees'
var_list[6].units = 'ddegrees'
var_list[7].units = 'dbar'
elif platform_name == 'CP02PMCO' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP02PMCO/WFP01/01-VEL3DK000/recovered_wfp/vel3d_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_k_eastward_velocity'
var_list[2].name = 'vel3d_k_northward_velocity'
var_list[3].name = 'vel3d_k_upward_velocity'
var_list[4].name = 'vel3d_k_heading'
var_list[5].name = 'vel3d_k_pitch'
var_list[6].name = 'vel3d_k_roll'
var_list[7].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'ddegrees'
var_list[5].units = 'ddegrees'
var_list[6].units = 'ddegrees'
var_list[7].units = 'dbar'
elif platform_name == 'CP02PMCO' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMCO/WFP01/03-CTDPFK000/telemetered/ctdpf_ckl_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'ctdpf_ckl_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdpf_ckl_seawater_pressure'
var_list[5].name = 'ctdpf_ckl_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP02PMCO' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP02PMCO/WFP01/03-CTDPFK000/recovered_wfp/ctdpf_ckl_wfp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdpf_ckl_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdpf_ckl_seawater_pressure'
var_list[5].name = 'ctdpf_ckl_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP02PMCO' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMCO/WFP01/05-PARADK000/telemetered/parad_k__stc_imodem_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_k_par'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CP02PMCO' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP02PMCO/WFP01/05-PARADK000/recovered_wfp/parad_k__stc_imodem_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_k_par'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CP02PMUI' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMUI/SBS01/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP02PMUI' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CP02PMUI/SBS01/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP02PMUI' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMUI/WFP01/04-FLORTK000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CP02PMUI' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP02PMUI/WFP01/04-FLORTK000/recovered_wfp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CP02PMUI' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMUI/WFP01/02-DOFSTK000/telemetered/dofst_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dofst_k_oxygen_l2'
var_list[2].name = 'dofst_k_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'Hz'
var_list[3].units = 'dbar'
elif platform_name == 'CP02PMUI' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP02PMUI/WFP01/02-DOFSTK000/recovered_wfp/dofst_k_wfp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dofst_k_oxygen_l2'
var_list[2].name = 'dofst_k_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'Hz'
var_list[3].units = 'dbar'
elif platform_name == 'CP02PMUI' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMUI/WFP01/01-VEL3DK000/telemetered/vel3d_k_wfp_stc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_k_eastward_velocity'
var_list[2].name = 'vel3d_k_northward_velocity'
var_list[3].name = 'vel3d_k_upward_velocity'
var_list[4].name = 'vel3d_k_heading'
var_list[5].name = 'vel3d_k_pitch'
var_list[6].name = 'vel3d_k_roll'
var_list[7].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'ddegrees'
var_list[5].units = 'ddegrees'
var_list[6].units = 'ddegrees'
var_list[7].units = 'dbar'
elif platform_name == 'CP02PMUI' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP02PMUI/WFP01/01-VEL3DK000/recovered_wfp/vel3d_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_k_eastward_velocity'
var_list[2].name = 'vel3d_k_northward_velocity'
var_list[3].name = 'vel3d_k_upward_velocity'
var_list[4].name = 'vel3d_k_heading'
var_list[5].name = 'vel3d_k_pitch'
var_list[6].name = 'vel3d_k_roll'
var_list[7].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'ddegrees'
var_list[5].units = 'ddegrees'
var_list[6].units = 'ddegrees'
var_list[7].units = 'dbar'
elif platform_name == 'CP02PMUI' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMUI/WFP01/03-CTDPFK000/telemetered/ctdpf_ckl_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'ctdpf_ckl_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdpf_ckl_seawater_pressure'
var_list[5].name = 'ctdpf_ckl_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP02PMUI' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP02PMUI/WFP01/03-CTDPFK000/recovered_wfp/ctdpf_ckl_wfp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdpf_ckl_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdpf_ckl_seawater_pressure'
var_list[5].name = 'ctdpf_ckl_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP02PMUI' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMUI/WFP01/05-PARADK000/telemetered/parad_k__stc_imodem_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_k_par'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CP02PMUI' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP02PMUI/WFP01/05-PARADK000/recovered_wfp/parad_k__stc_imodem_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_k_par'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CP02PMUO' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMUO/SBS01/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP02PMUO' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CP02PMUO/SBS01/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP02PMUO' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMUO/WFP01/04-FLORTK000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CP02PMUO' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP02PMUO/WFP01/04-FLORTK000/recovered_wfp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CP02PMUO' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMUO/WFP01/02-DOFSTK000/telemetered/dofst_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dofst_k_oxygen_l2'
var_list[2].name = 'dofst_k_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'Hz'
var_list[3].units = 'dbar'
elif platform_name == 'CP02PMUO' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP02PMUO/WFP01/02-DOFSTK000/recovered_wfp/dofst_k_wfp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dofst_k_oxygen_l2'
var_list[2].name = 'dofst_k_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'Hz'
var_list[3].units = 'dbar'
elif platform_name == 'CP02PMUO' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMUO/WFP01/01-VEL3DK000/telemetered/vel3d_k_wfp_stc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_k_eastward_velocity'
var_list[2].name = 'vel3d_k_northward_velocity'
var_list[3].name = 'vel3d_k_upward_velocity'
var_list[4].name = 'vel3d_k_heading'
var_list[5].name = 'vel3d_k_pitch'
var_list[6].name = 'vel3d_k_roll'
var_list[7].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'ddegrees'
var_list[5].units = 'ddegrees'
var_list[6].units = 'ddegrees'
var_list[7].units = 'dbar'
elif platform_name == 'CP02PMUO' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP02PMUO/WFP01/01-VEL3DK000/recovered_wfp/vel3d_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_k_eastward_velocity'
var_list[2].name = 'vel3d_k_northward_velocity'
var_list[3].name = 'vel3d_k_upward_velocity'
var_list[4].name = 'vel3d_k_heading'
var_list[5].name = 'vel3d_k_pitch'
var_list[6].name = 'vel3d_k_roll'
var_list[7].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'ddegrees'
var_list[5].units = 'ddegrees'
var_list[6].units = 'ddegrees'
var_list[7].units = 'dbar'
elif platform_name == 'CP02PMUO' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMUO/WFP01/03-CTDPFK000/telemetered/ctdpf_ckl_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'ctdpf_ckl_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdpf_ckl_seawater_pressure'
var_list[5].name = 'ctdpf_ckl_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP02PMUO' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP02PMUO/WFP01/03-CTDPFK000/recovered_wfp/ctdpf_ckl_wfp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdpf_ckl_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdpf_ckl_seawater_pressure'
var_list[5].name = 'ctdpf_ckl_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP02PMUO' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMUO/WFP01/05-PARADK000/telemetered/parad_k__stc_imodem_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_k_par'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CP02PMUO' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP02PMUO/WFP01/05-PARADK000/recovered_wfp/parad_k__stc_imodem_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_k_par'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CP03ISPM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISPM/SBS01/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP03ISPM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISPM/SBS01/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP03ISPM' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISPM/WFP01/04-FLORTK000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CP03ISPM' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP03ISPM/WFP01/04-FLORTK000/recovered_wfp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CP03ISPM' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISPM/WFP01/02-DOFSTK000/telemetered/dofst_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dofst_k_oxygen_l2'
var_list[2].name = 'dofst_k_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'Hz'
var_list[3].units = 'dbar'
elif platform_name == 'CP03ISPM' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP03ISPM/WFP01/02-DOFSTK000/recovered_wfp/dofst_k_wfp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dofst_k_oxygen_l2'
var_list[2].name = 'dofst_k_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'Hz'
var_list[3].units = 'dbar'
elif platform_name == 'CP03ISPM' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISPM/WFP01/01-VEL3DK000/telemetered/vel3d_k_wfp_stc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_k_eastward_velocity'
var_list[2].name = 'vel3d_k_northward_velocity'
var_list[3].name = 'vel3d_k_upward_velocity'
var_list[4].name = 'vel3d_k_heading'
var_list[5].name = 'vel3d_k_pitch'
var_list[6].name = 'vel3d_k_roll'
var_list[7].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'ddegrees'
var_list[5].units = 'ddegrees'
var_list[6].units = 'ddegrees'
var_list[7].units = 'dbar'
elif platform_name == 'CP03ISPM' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP03ISPM/WFP01/01-VEL3DK000/recovered_wfp/vel3d_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_k_eastward_velocity'
var_list[2].name = 'vel3d_k_northward_velocity'
var_list[3].name = 'vel3d_k_upward_velocity'
var_list[4].name = 'vel3d_k_heading'
var_list[5].name = 'vel3d_k_pitch'
var_list[6].name = 'vel3d_k_roll'
var_list[7].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'ddegrees'
var_list[5].units = 'ddegrees'
var_list[6].units = 'ddegrees'
var_list[7].units = 'dbar'
elif platform_name == 'CP03ISPM' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISPM/WFP01/03-CTDPFK000/telemetered/ctdpf_ckl_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'ctdpf_ckl_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdpf_ckl_seawater_pressure'
var_list[5].name = 'ctdpf_ckl_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP03ISPM' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP03ISPM/WFP01/03-CTDPFK000/recovered_wfp/ctdpf_ckl_wfp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdpf_ckl_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdpf_ckl_seawater_pressure'
var_list[5].name = 'ctdpf_ckl_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP03ISPM' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISPM/WFP01/05-PARADK000/telemetered/parad_k__stc_imodem_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_k_par'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CP03ISPM' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP03ISPM/WFP01/05-PARADK000/recovered_wfp/parad_k__stc_imodem_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_k_par'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CP04OSPM' and node == 'RISER' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CP04OSPM/RII01/02-ADCPSL010/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CP04OSPM' and node == 'RISER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSPM/RII01/02-ADCPSL010/recovered_host/adcps_jln_stc_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'adcps_jln_heading'
var_list[3].name = 'adcps_jln_pitch'
var_list[4].name = 'adcps_jln_roll'
var_list[5].name = 'adcps_jln_eastward_seawater_velocity2'
var_list[6].name = 'adcps_jln_northward_seawater_velocity2'
var_list[7].name = 'adcps_jln_upward_seawater_velocity2'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'cdegree'
var_list[3].units = 'cdegree'
var_list[4].units = 'cdegree'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CP04OSPM' and node == 'RISER' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSPM/RII01/02-ADCPSL010/telemetered/adcps_jln_stc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'adcps_jln_heading'
var_list[3].name = 'adcps_jln_pitch'
var_list[4].name = 'adcps_jln_roll'
var_list[5].name = 'adcps_jln_eastward_seawater_velocity2'
var_list[6].name = 'adcps_jln_northward_seawater_velocity2'
var_list[7].name = 'adcps_jln_upward_seawater_velocity2'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'cdegree'
var_list[3].units = 'cdegree'
var_list[4].units = 'cdegree'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CP01CNPM' and node == 'RISER' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CP01CNPM/RII01/02-ADCPTG010/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CP01CNPM' and node == 'RISER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNPM/RII01/02-ADCPTG010/recovered_host/adcps_jln_stc_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'adcps_jln_heading'
var_list[3].name = 'adcps_jln_pitch'
var_list[4].name = 'adcps_jln_roll'
var_list[5].name = 'adcps_jln_eastward_seawater_velocity2'
var_list[6].name = 'adcps_jln_northward_seawater_velocity2'
var_list[7].name = 'adcps_jln_upward_seawater_velocity2'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'cdegree'
var_list[3].units = 'cdegree'
var_list[4].units = 'cdegree'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CP01CNPM' and node == 'RISER' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNPM/RII01/02-ADCPTG010/telemetered/adcps_jln_stc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'adcps_jln_heading'
var_list[3].name = 'adcps_jln_pitch'
var_list[4].name = 'adcps_jln_roll'
var_list[5].name = 'adcps_jln_eastward_seawater_velocity2'
var_list[6].name = 'adcps_jln_northward_seawater_velocity2'
var_list[7].name = 'adcps_jln_upward_seawater_velocity2'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'cdegree'
var_list[3].units = 'cdegree'
var_list[4].units = 'cdegree'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CP02PMCI' and node == 'RISER' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CP02PMCI/RII01/02-ADCPTG010/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CP02PMCI' and node == 'RISER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CP02PMCI/RII01/02-ADCPTG010/recovered_host/adcps_jln_stc_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'adcps_jln_heading'
var_list[3].name = 'adcps_jln_pitch'
var_list[4].name = 'adcps_jln_roll'
var_list[5].name = 'adcps_jln_eastward_seawater_velocity2'
var_list[6].name = 'adcps_jln_northward_seawater_velocity2'
var_list[7].name = 'adcps_jln_upward_seawater_velocity2'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'cdegree'
var_list[3].units = 'cdegree'
var_list[4].units = 'cdegree'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CP02PMCI' and node == 'RISER' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMCI/RII01/02-ADCPTG010/telemetered/adcps_jln_stc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'adcps_jln_heading'
var_list[3].name = 'adcps_jln_pitch'
var_list[4].name = 'adcps_jln_roll'
var_list[5].name = 'adcps_jln_eastward_seawater_velocity2'
var_list[6].name = 'adcps_jln_northward_seawater_velocity2'
var_list[7].name = 'adcps_jln_upward_seawater_velocity2'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'cdegree'
var_list[3].units = 'cdegree'
var_list[4].units = 'cdegree'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CP02PMCO' and node == 'RISER' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CP02PMCO/RII01/02-ADCPTG010/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CP02PMCO' and node == 'RISER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CP02PMCO/RII01/02-ADCPTG010/recovered_host/adcps_jln_stc_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'adcps_jln_heading'
var_list[3].name = 'adcps_jln_pitch'
var_list[4].name = 'adcps_jln_roll'
var_list[5].name = 'adcps_jln_eastward_seawater_velocity2'
var_list[6].name = 'adcps_jln_northward_seawater_velocity2'
var_list[7].name = 'adcps_jln_upward_seawater_velocity2'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'cdegree'
var_list[3].units = 'cdegree'
var_list[4].units = 'cdegree'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CP02PMCO' and node == 'RISER' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMCO/RII01/02-ADCPTG010/telemetered/adcps_jln_stc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'adcps_jln_heading'
var_list[3].name = 'adcps_jln_pitch'
var_list[4].name = 'adcps_jln_roll'
var_list[5].name = 'adcps_jln_eastward_seawater_velocity2'
var_list[6].name = 'adcps_jln_northward_seawater_velocity2'
var_list[7].name = 'adcps_jln_upward_seawater_velocity2'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'cdegree'
var_list[3].units = 'cdegree'
var_list[4].units = 'cdegree'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CP02PMUI' and node == 'RISER' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CP02PMUI/RII01/02-ADCPTG010/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CP02PMUI' and node == 'RISER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CP02PMUI/RII01/02-ADCPTG010/recovered_host/adcps_jln_stc_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'adcps_jln_heading'
var_list[3].name = 'adcps_jln_pitch'
var_list[4].name = 'adcps_jln_roll'
var_list[5].name = 'adcps_jln_eastward_seawater_velocity2'
var_list[6].name = 'adcps_jln_northward_seawater_velocity2'
var_list[7].name = 'adcps_jln_upward_seawater_velocity2'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'cdegree'
var_list[3].units = 'cdegree'
var_list[4].units = 'cdegree'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CP02PMUI' and node == 'RISER' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMUI/RII01/02-ADCPTG010/telemetered/adcps_jln_stc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'adcps_jln_heading'
var_list[3].name = 'adcps_jln_pitch'
var_list[4].name = 'adcps_jln_roll'
var_list[5].name = 'adcps_jln_eastward_seawater_velocity2'
var_list[6].name = 'adcps_jln_northward_seawater_velocity2'
var_list[7].name = 'adcps_jln_upward_seawater_velocity2'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'cdegree'
var_list[3].units = 'cdegree'
var_list[4].units = 'cdegree'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CP02PMUO' and node == 'RISER' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CP02PMUO/RII01/02-ADCPSL010/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CP02PMUO' and node == 'RISER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CP02PMUO/RII01/02-ADCPSL010/recovered_host/adcps_jln_stc_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'adcps_jln_heading'
var_list[3].name = 'adcps_jln_pitch'
var_list[4].name = 'adcps_jln_roll'
var_list[5].name = 'adcps_jln_eastward_seawater_velocity2'
var_list[6].name = 'adcps_jln_northward_seawater_velocity2'
var_list[7].name = 'adcps_jln_upward_seawater_velocity2'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'cdegree'
var_list[3].units = 'cdegree'
var_list[4].units = 'cdegree'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CP02PMUO' and node == 'RISER' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMUO/RII01/02-ADCPSL010/telemetered/adcps_jln_stc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'adcps_jln_heading'
var_list[3].name = 'adcps_jln_pitch'
var_list[4].name = 'adcps_jln_roll'
var_list[5].name = 'adcps_jln_eastward_seawater_velocity2'
var_list[6].name = 'adcps_jln_northward_seawater_velocity2'
var_list[7].name = 'adcps_jln_upward_seawater_velocity2'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'cdegree'
var_list[3].units = 'cdegree'
var_list[4].units = 'cdegree'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CP03ISPM' and node == 'RISER' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CP03ISPM/RII01/02-ADCPTG010/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = | np.array([]) | numpy.array |
import sys
homeCodePath=r"H:\10_Python\005_Scripts_from_others\Laurent\wicking_pnm"
if homeCodePath not in sys.path:
sys.path.append(homeCodePath)
import random
import xarray as xr
import numpy as np
import scipy as sp
import networkx as nx
import time
from collections import deque
from skimage.morphology import cube
from scipy.interpolate import interp1d
from statsmodels.distributions.empirical_distribution import ECDF
from joblib import Parallel, delayed
from os import path
import wickingpnm.waitingtimes as waitingtimes
time_limit = {'T3_100_10_III': 344,
'T3_300_5': 229,
'T3_100_7': 206,
'T3_100_10': 232}
def label_function(struct, pore_object, label, labels, verbose = False):
mask = pore_object == label
connections = deque()
if verbose:
print('Searching around {}'.format(label))
mask = sp.ndimage.binary_dilation(input = mask, structure = struct(3))
neighbors = np.unique(pore_object[mask])[1:]
for nb in neighbors:
if nb != label:
if nb in labels:
conn = (label, nb)
if verbose:
print('\t{} connects to {}'.format(conn[1], conn[0]))
connections.append(conn)
return connections
class PNM:
def __init__(self,
graph = None,
data_path = None,
sample = None,
inlets = [],
inlet_count = 5,
R_inlet = 1E17,
job_count = 4,
rand_exp_data = False,
rand_pore_props = False,
rand_waiting_times = False,
verbose = False,
seed = int(time.time()),
randomize_pore_data = False
):
self.job_count = job_count
self.verbose = verbose
self.data_path = data_path # Head directory for the data
self.sample = sample # Sample name
self.seed = seed
self.randomize_pore_data = randomize_pore_data
dyn_data_dir, pore_props_dir, pore_diff_dir = \
'dyn_data', 'pore_props', 'pore_diffs'
# dyn_data_dir, pore_props_dir, pore_diff_dir = '', '', ''
# self.exp_data_path = path.join(data_path, dyn_data_dir, 'dyn_data_' + sample + '.nc')
# self.pore_props_path = path.join(data_path, pore_props_dir, 'pore_props_' + sample + '.nc')
# self.pore_diff_path = path.join(data_path, pore_diff_dir, 'peak_diff_data_' + sample + '.nc')
self.exp_data_path = path.join(data_path, 'dyn_data_' + sample + '.nc')
self.pore_props_path = path.join(data_path, 'pore_props_' + sample + '.nc')
# self.pore_diff_path = path.join(data_path, 'peak_diff_data_' + sample + '.nc')
drive = r'\\192.168.3.11\data118'
diff_data_path = path.join(drive, 'Robert_TOMCAT_3_netcdf4_archives', 'processed_1200_dry_seg_aniso_sep')
self.pore_diff_path = path.join(diff_data_path, 'peak_diff_data_' + sample + '.nc')
self.randomize_waiting_times = rand_waiting_times
self.pore_diff_data = None
if path.isfile(self.pore_diff_path) and not rand_waiting_times:
self.pore_diff_data = waitingtimes.get_diff_data(self.pore_diff_path)
self.graph = graph
self.data = None
self.waiting_times = np.array([])
self.V = None # Water volume in the network
self.filled = None # filled nodes
self.inlets = inlets # inlet pores
self.R0 = None # pore resistances
self.R_full = None # resistances when full
self.R_inlet = R_inlet # inlet resistance
self.radi = None
self.heights = None
# self.volumes = None
if path.isfile(self.exp_data_path) and not rand_exp_data:
print('Reading the experimental dataset at {}'.format(self.exp_data_path))
self.data = xr.load_dataset(self.exp_data_path)
self.generate_graph(self.data)
self.nodes = {} # Dictionary translating labels to graph nodes. I am not sure if I quite understand, please review the correct use in lines 194&195, I need a list of the original labels
self.label_dict = {} # Dictionary translating graph nodes to labels
i = 0
for node in self.graph.nodes():
self.nodes[i] = node
self.label_dict[node] = i
i += 1
# self.labels contains the list of unique identifiers for the nodes
self.labels = np.arange(len(self.nodes))
if path.isfile(self.pore_props_path) and not rand_pore_props:
print('Reading the pore dataset at {}'.format(self.pore_props_path))
pore_data = xr.load_dataset(self.pore_props_path)
self.generate_pore_data(pore_data)
else:
self.generate_pore_data()
self.generate_waiting_times()
self.build_inlets(inlet_count)
def extract_throat_list(self, label_matrix, labels):
"""
inspired by <NAME>'s GETNET
extracts a list of directed throats connecting pores i->j including a few throat parameters
undirected network i-j needs to be calculated in a second step
"""
def extend_bounding_box(s, shape, pad=3):
a = deque()
for i, dim in zip(s, shape):
start = 0
stop = dim
if i.start - pad >= 0:
start = i.start - pad
if i.stop + pad < dim:
stop = i.stop + pad
a.append(slice(start, stop, None))
return tuple(a)
im = label_matrix
struct = cube # ball does not work as you would think (anisotropic expansion)
# if im.ndim == 2:
# struct = disk
crude_pores = sp.ndimage.find_objects(im)
# throw out None-entries (counterintuitive behavior of find_objects)
pores = deque()
bounding_boxes = deque()
for pore in crude_pores:
if pore is not None: bb = extend_bounding_box(pore, im.shape)
if pore is not None and len(np.unique(im[bb])) > 2:
pores.append(pore)
bounding_boxes.append(bb)
connections_raw = Parallel(n_jobs = self.job_count)(
delayed(label_function)\
(struct, im[bounding_box], label, labels, self.verbose) \
for (bounding_box, label) in zip(bounding_boxes, labels)
)
# clear out empty objects
connections = deque()
for connection in connections_raw:
if len(connection) > 0:
connections.append(connection)
return np.concatenate(connections, axis = 0)
def generate_graph(self, exp_data):
label_matrix = exp_data['label_matrix'].data
labels = exp_data['label'].data
# clean up label matrix for late spurious pixels (at fiber surfacedue to not correctable image shift )
# raw_labels = np.unique(label_matrix)
# for label in raw_labels[1:]:
# if not label in labels:
# label_matrix[np.where(label_matrix==label)] = 0
if self.verbose:
print('labels', labels)
print('label matrix shape', label_matrix.shape)
if self.graph is None:
print('Generating the pore network graph from the experimental dataset')
throats = self.extract_throat_list(label_matrix, labels)
self.graph = nx.Graph()
self.graph.add_edges_from(np.uint16(throats[:,:2]))
Gcc = sorted(nx.connected_components(self.graph), key=len, reverse=True)
self.graph = self.graph.subgraph(Gcc[0])
def generate_pore_data(self, pore_data = None):
if pore_data is None:
if self.verbose:
print('Filling the graph with random pore data')
size = self.labels.max() + 1
re = self.radi = np.random.rand(size)
h0e = self.heights = np.random.rand(size)
for i in range(size):
re[i] /= 10**np.random.randint(5, 6)
h0e[i] /= 10**np.random.randint(4, 5)
return
print('Using experimental pore data')
px = pore_data.attrs['voxel']
vx = px**3
tmax = -1
if self.sample in list(time_limit.keys()):
tmax = time_limit[self.sample]
relevant_labels = list(self.label_dict.keys())
radi = self.radi = px*np.sqrt(pore_data['value_properties'].sel(property = 'median_area').data/np.pi) #
heights = self.heights = px*pore_data['value_properties'].sel(property = 'major_axis').data #
# volumes = self.volumes = vx*pore_data['value_properties'].sel(property = 'volume', label = relevant_labels).data
volumes = vx*self.data['volume'][:,tmax-10:tmax-1].sel().median(dim='time').data#
volumes[volumes==0] = np.median(volumes[volumes>0])
self.volumes = volumes
size = self.labels.max() + 1
if self.data is None or self.randomize_pore_data == True:
# corr = exp_data['sig_fit_data'].sel(sig_fit_var = 'alpha [vx]')/pore_data['value_properties'].sel(property = 'volume', label = exp_data['label'])
# pore_data['value_properties'].sel(property = 'median_area', label = exp_data['label']) = 1/corr*pore_data['value_properties'].sel(property = 'median_area', label = exp_data['label'])
# pore_data['value_properties'].sel(property = 'major_axis', label = exp_data['label']) = corr
print('Initializing pore props from ECDF distribution')
# you can use all pores even those outside the network (isolated nodes) as base for the statistical distributio here
radi = self.radi = px*np.sqrt(pore_data['value_properties'].sel(property = 'median_area').data/np.pi)
heights = self.heights = px*pore_data['value_properties'].sel(property = 'major_axis').data
size = self.labels.max() + 1
# TODO: couple radii and heights because they correlate slightly, currently the pore resulting pore volumes are too small
# or, mix distribution functions of height, radius and volume. Something to think about ... for later ...
ecdf_radi, ecdf_heights, ecdf_volumes = ECDF(radi), ECDF(heights), ECDF(volumes)
seed = self.seed
prngpore = np.random.RandomState(seed)
prngpore2 = np.random.RandomState(seed*7+117)
random_input1 = lambda size: prngpore.rand(size)
random_input2 = lambda size: prngpore2.rand(size)
# factored_input = lambda size, factor: factor*np.ones(size) # factored_input(size, 0.7)
radi = self.radi = interp1d(ecdf_radi.y[1:], ecdf_radi.x[1:], fill_value = 'extrapolate')(random_input1(size))
self.heights = interp1d(ecdf_heights.y[1:], ecdf_heights.x[1:], fill_value = 'extrapolate')(random_input2(size))
volumes = self.volumes = interp1d(ecdf_volumes.y[1:], ecdf_volumes.x[1:], fill_value = 'extrapolate')(random_input2(size))
# self.heights = volumes/np.pi/radi**2
def generate_waiting_times(self):
size = self.labels.max() + 1
data = self.pore_diff_data
if self.randomize_waiting_times or data is None:
print('Using random waiting times.')
times = self.waiting_times = np.random.rand(size)
for i in range(size):
times[i] *= 10**np.random.randint(-1, 3)
else:
print('Generating waiting times from ECDF distribution')
# print('Generating waiting times from Gamma distribution')
self.waiting_times = waitingtimes.from_ecdf(data, len(self.labels))
# self.waiting_times = waitingtimes.from_sigmoid_ecdf(data, len(self.labels))
# self.waiting_times = waitingtimes.from_gamma_fit(len(self.labels))
# TODO: get waiting times from gamma distribution again
def build_inlets(self, amount = 5):
inlets = np.array(self.inlets, dtype = np.int)
if not np.any(inlets):
self.generate_inlets(amount)
else:
# double-check if inlet pores are actually in the network
temp_inlets = deque()
print('Taking inlets from command-line arguments.')
for inlet in inlets:
if self.nodes[inlet] in self.graph:
temp_inlets.append(inlet)
self.inlets = np.array(temp_inlets)
# maybe TODO: Change this to start with one random inlet and some amount of distant neighbours
def generate_inlets(self, amount):
print('Generating {} inlets'.format(amount))
prng = np.random.RandomState(self.seed)
self.inlets = prng.choice(list(self.labels), size=amount, replace=False)
def neighbour_labels(self, node):
neighbour_nodes = self.graph.neighbors(self.nodes[node])
neighbours = deque()
for neighbour in neighbour_nodes:
neighbours.append(self.label_dict[neighbour])
return neighbours
def outlet_resistances(self):
"""
find your path through the filled network to calculate the inlet
resistance imposed on the pores at the waterfront
quick and dirty, this part makes the code slow and might even be wrong
we have to check
"""
# initialize pore resistances
self.R0 = np.zeros(len(self.filled))
# only filled pores contribute to the network permeability
filled_inlets = deque()
for inlet in self.inlets:
if self.filled[inlet]:
filled_inlets.append(inlet)
if self.verbose:
print('\nfilled inlets', filled_inlets)
return self.outlet_resistances_r(filled_inlets)
# this function recursivelself.waiting_times ould calculate the effective inlet resistance
# for every pore with the same distance (layer) to the network inlet
def outlet_resistances_r(self, layer, visited = {}):
if len(layer) == 0:
return self.R0
if self.verbose:
print('current layer', layer)
next_layer = deque()
for node in layer:
inv_R_eff = np.float64(0)
neighbours = self.neighbour_labels(node)
if self.verbose:
print('visiting node', node)
for neighbour in neighbours:
if neighbour in layer:
inv_R_eff += 1/ | np.float64(self.R0[neighbour] + self.R_full[neighbour]) | numpy.float64 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.