prompt
stringlengths 19
879k
| completion
stringlengths 3
53.8k
| api
stringlengths 8
59
|
---|---|---|
#!/usr/bin/env python3 -u
"""
Main Script for training and testing
"""
import argparse
import json
import logging
import os
import pdb
import random
import sys
import time as t
from collections import OrderedDict
import numpy as np
import spacy
import torch
from torch import nn
from torch.optim.lr_scheduler import ReduceLROnPlateau
from torch.utils.data import DataLoader, RandomSampler, SubsetRandomSampler
from data_loader import ContDataset, MySampler
from dataset_settings import data_dir
# from extra import plot_batch
from model import ContModel
from settings import (args, batch_size, device,
individual_turnpair_experiments, just_test,
just_test_folder, just_test_model, language_size,
load_encodings, load_model, load_second_encodings,
lstm_sets_dict, max_epochs, naming_dict, note_append,
num_data_loader_workers)
from settings import num_feat_per_person as num_feat_per_person_dict
from settings import (optim_patience, pred_task_dict,
test_dataset_settings_dict, test_file_list,
time_out_length, train_dataset_settings_dict,
train_file_list, use_ling, vae_data_multiplier,
vae_data_multiplier_2, vae_experiments, vae_target_da,
valid_dataset_settings_dict, valid_file_list)
from test_funcs import test
from util import (get_individual_turnpair_dataset, get_vae_dataset,
get_vae_encodings)
# from test_funcs import sanity_check_func, get_batch_items_for_full_test
sys.dont_write_bytecode = True
torch.autograd.set_detect_anomaly(True)
torch.set_default_dtype(torch.float32)
SET_TRAIN = 0
SET_VALID = 1
SET_TEST = 2
def main():
SEED = args.seed
random.seed(SEED)
torch.manual_seed(SEED)
torch.cuda.manual_seed_all(SEED)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
np.random.seed(SEED)
t0 = t.time()
embeds_usr, embeds_sys, embeds, num_feat_per_person, sil_tok, nlp = setup_ling()
if just_test:
just_test_func(
embeds_usr=embeds_usr,
embeds_sys=embeds_sys,
embeds=embeds,
num_feat_per_person=num_feat_per_person,
sil_tok=sil_tok,
nlp=nlp
)
def _init_fn(): return np.random.seed(SEED)
print('Loading valid DATA')
valid_dataset = ContDataset(valid_dataset_settings_dict)
valid_dataset.embeds_usr = embeds_usr
valid_dataset.embeds_sys = embeds_sys
valid_dataset.nlp = nlp
valid_dataset.sil_tok = sil_tok
collate_fn_valid = valid_dataset.collate_fn
valid_sampler = MySampler(valid_dataset)
valid_dataloader = DataLoader(valid_dataset, sampler=valid_sampler,
batch_size=batch_size, collate_fn=collate_fn_valid,
num_workers=num_data_loader_workers,
worker_init_fn=_init_fn)
num_valid_batches = len(valid_dataloader)
valid_dataset.update_annots_test = test_dataset_settings_dict['update_annots_test']
print('Loading train DATA')
train_dataset = ContDataset(train_dataset_settings_dict)
train_dataset.embeds_usr = embeds_usr
train_dataset.embeds_sys = embeds_sys
train_dataset.nlp = nlp
train_dataset.sil_tok = sil_tok
collate_fn_train = train_dataset.collate_fn
if lstm_sets_dict['two_sys_turn']:
tmp_sampler = MySampler(train_dataset)
train_sampler = SubsetRandomSampler(tmp_sampler.my_indices_no_first)
else:
train_sampler = RandomSampler(
train_dataset) if lstm_sets_dict['train_random_sample'] else MySampler(train_dataset)
train_dataloader = DataLoader(train_dataset, sampler=train_sampler,
batch_size=batch_size, collate_fn=collate_fn_train,
num_workers=num_data_loader_workers, worker_init_fn=_init_fn)
num_train_batches = len(train_dataloader)
print('Done loading all DATA')
print('time taken: \n' + str(t.time() - t0))
context_vec_settings_dict = {
'train': len(train_file_list),
'valid': len(valid_file_list),
'test': len(test_file_list)
}
lstm_sets_dict['sil_tok'] = sil_tok
model = ContModel(num_feat_per_person, lstm_sets_dict,
device, context_vec_settings_dict,
embeds_usr, embeds_sys, embeds)
# print([[k, i.shape, torch.prod(torch.tensor(i.shape))] for k, i in model.state_dict().items()]) # keep for debugging
# best_valid_loss = 10000
iteration = 0
results_dict = OrderedDict()
results_dict['train'], results_dict['valid'], results_dict['test'] = OrderedDict(
), OrderedDict(), OrderedDict()
for task in ['all'] + pred_task_dict['active_outputs'] + ['iteration', 'epoch', ]:
results_dict['train'][task] = []
results_dict['valid'][task] = []
results_dict['test'][task] = []
# results_dict['test']['num_batches'] = len(test_dataloader)
results_dict['valid']['stats'] = []
results_dict['test']['stats'] = []
if load_model:
print('LOADING MODEL FROM DISK')
if torch.cuda.is_available():
checkpoint = torch.load(just_test_folder + '/model.pt')
else:
checkpoint = torch.load(
just_test_folder+'/model.pt', map_location='cpu')
model = torch.nn.DataParallel(model, dim=0)
model.load_state_dict(checkpoint)
model.to(device)
embeds = model.module.embeds
train_dataset.embeds = embeds
valid_dataset.embeds = embeds
train_dataset.nlp = nlp
valid_dataset.nlp = nlp
valid_dataset.sil_tok = sil_tok
results_dict = json.load(open(just_test_folder + '/results.json', 'r'))
# model.load_state_dict(checkpoint, strict=False)
iteration = results_dict['train']['iteration'][-1]
if not note_append == '_dev' and not os.path.exists(just_test_folder+'/optimizer.pt'):
initial_learning_rate = float(input("Set initial learning rate:"))
lstm_sets_dict['learning_rate'] = initial_learning_rate
else:
model = torch.nn.DataParallel(model, dim=0)
model.to(device)
def count_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
def get_lr(optimizer):
for param_group in optimizer.param_groups:
return param_group['lr']
print('Parameter count:{}'.format(int(count_parameters(model))))
optimizer = torch.optim.Adam(model.parameters(
), lr=lstm_sets_dict['learning_rate'], weight_decay=lstm_sets_dict['l2'])
if load_model and os.path.exists(just_test_folder + '/optimizer.pt'):
optim_state = torch.load(just_test_folder+'/optimizer.pt')
optimizer.load_state_dict(optim_state)
print('optimizer loaded. LR:{}'.format(optimizer.defaults['lr']))
# scheduler = ReduceLROnPlateau(optimizer, 'min', patience=optim_patience, min_lr=5.0e-06, verbose=True)
# 9000, 2000, 2000, 1000, 1000 iterations.
scheduler = torch.optim.lr_scheduler.MultiStepLR(
optimizer, milestones=lstm_sets_dict['milestones'], gamma=0.1)
if load_model and os.path.exists(just_test_folder + '/scheduler.pt'):
sched_state = torch.load(just_test_folder + '/scheduler.pt')
scheduler.load_state_dict(sched_state)
print('scheduler loaded.')
print('LR {}'.format(get_lr(optimizer)))
# test_dataset.embeds = model.module.embeds
train_dataset.embeds = model.module.embeds
valid_dataset.embeds = model.module.embeds
# Train
for epoch in range(max_epochs):
model.train()
loss_dict_train_raw = {
task: 0.0 for task in pred_task_dict['active_outputs']}
loss_dict_train_raw['all'] = 0.0
num_pred_samples_for_result = {
task: 0 for task in pred_task_dict['active_outputs']}
model.module.reset_hidden('train')
# hidden_inference = model.module.hidden_inference['train']
model.zero_grad()
start_time = t.time()
for batch_ndx, batch in enumerate(train_dataloader):
if not (lstm_sets_dict['two_sys_turn']) and (len(batch['update_strt_f']) != batch_size):
# This should just be triggered for the last few batches of the epoch
continue
if lstm_sets_dict['two_sys_turn'] and batch['sys_trn_1'].shape[0] != int(batch_size * 2):
print('caught small batch')
continue
cont_file_indx, cont_ab_indx = batch['file_idx'], batch['a_idx']
if lstm_sets_dict['two_sys_turn']:
cont_file_indx = cont_file_indx[::2]
cont_ab_indx = cont_ab_indx[::2]
# h_inf = hidden_inference[:, cont_file_indx, cont_ab_indx, 0, :]
# c_inf = hidden_inference[:, cont_file_indx, cont_ab_indx, 1, :]
mod_in = {k: v for k, v in batch.items() if not (k in ['y_dict'])}
# mod_in['h_inf'] = h_inf.squeeze(0)
# mod_in['c_inf'] = c_inf.squeeze(0)
mod_in = {**batch['y_dict'], **mod_in}
# loc_seed = torch.LongTensor([random.randint(0, 1<<31)]*2).unsqueeze(1)
# mod_in['seed'] = loc_seed
bp_loss, outputs = model(**mod_in)
loss = torch.sum(bp_loss)
loss.backward()
optimizer.step()
optimizer.zero_grad()
# if lstm_sets_dict['plot_batch']:
# for b_i in [0, 1, 2, 3]:
# plot_batch(outputs, mod_in, train_dataset, b_i)
# quit()
# Don't delete, need for context
# hidden_inference[:, cont_file_indx, cont_ab_indx, 0, :] = outputs['h_inf'].detach().cpu()
# hidden_inference[:, cont_file_indx, cont_ab_indx, 1, :] = outputs['c_inf'].detach().cpu()
# aggregate info
loss_dict_train_raw = {k: float(loss_dict_train_raw[k]) + float(np.sum(
v.data.cpu().numpy())) for k, v in outputs['loss_dict_train_raw'].items()}
num_pred_samples_for_result = {k: num_pred_samples_for_result[k] + int(np.sum(
v.data.cpu().numpy())) for k, v in outputs['num_pred_samples_for_result'].items()}
if (iteration + 1) % 10 == 0 or ((note_append == '_dev' or note_append == '_dev_restart_') and (iteration + 1) % 2 == 0):
print_results = {}
print_results['all'] = 0.0
weight_denom = 0.0
for task in pred_task_dict['active_outputs']:
print_results[task] = loss_dict_train_raw[task] / \
num_pred_samples_for_result[task]
print_results['all'] += pred_task_dict[task]['weight'] * \
print_results[task]
weight_denom += pred_task_dict[task]['weight']
num_pred_samples_for_result[task] = 0
loss_dict_train_raw[task] = 0.0
print_results['all'] = print_results['all']/weight_denom
elapsed = t.time() - start_time
loss_string = ''
loss_string += ' train | epoch {:2d} {:4d}/{:4d}| dur(s) {:4.2f} |'
loss_string += ''.join(
[task + ' {:1.5f} |' for task in pred_task_dict['active_outputs']])
loss_string += ' Weighted {:1.5f} '
loss_string_items = [epoch, batch_ndx+1, num_train_batches, elapsed] + [
print_results[task] for task in pred_task_dict['active_outputs']] + [print_results['all']]
print(loss_string.format(*loss_string_items))
for task in pred_task_dict['active_outputs']:
results_dict['train'][task].append(
float(print_results[task]))
results_dict['train']['all'].append(
float(print_results['all']))
results_dict['train']['iteration'].append(int(iteration) + 1)
results_dict['train']['epoch'].append(int(epoch))
start_time = t.time()
if (iteration + 1) % 200 == 0 or ((note_append == '_dev' or note_append == '_dev_restart_') and (iteration + 1) % 2 == 0): # 25
full_test_flag = lstm_sets_dict['valid_full_test_flag']
model.module.autoregress = lstm_sets_dict['valid_autoregress']
valid_loss_all, valid_loss_TL = test(
model, valid_dataloader, full_test_flag, results_dict, iteration, epoch)
if note_append != '_dev' and (np.argmin(valid_loss_TL) == (len(valid_loss_TL)-1)):
torch.save(model.state_dict(),
naming_dict['fold_name']+'/best_model.pt')
torch.save(model.state_dict(),
naming_dict['fold_name'] + '/model.pt')
torch.save(optimizer.state_dict(),
naming_dict['fold_name']+'/optimizer.pt')
json.dump(results_dict, open(
naming_dict['fold_name'] + '/results.json', 'w'), indent=4)
# scheduler.step(valid_loss_all[-1])
scheduler.step()
torch.save(scheduler.state_dict(),
naming_dict['fold_name']+'/scheduler.pt')
print(naming_dict['fold_name'])
print('LR {}'.format(get_lr(optimizer)))
print('Best TL valid loss: {:.4f} ({} steps ago) \n'.format(
np.min(valid_loss_TL), len(valid_loss_TL) - np.argmin(valid_loss_TL)))
# Run tests after final iteration
if scheduler._step_count >= scheduler.milestones[-1]:
# load test dataloader
# del train_dataset.dataset # free some RAM
# test_dataset = ContDataset(test_dataset_settings_dict)
# collate_fn_test = test_dataset.collate_fn
# test_dataset.embeds_usr = embeds_usr
# test_dataset.embeds_sys = embeds_sys
# test_dataset.embeds = embeds
# test_dataset.nlp = nlp
# test_dataset.sil_tok = sil_tok
# test_sampler = MySampler(test_dataset)
# test_dataloader = DataLoader(test_dataset, sampler=test_sampler,
# batch_size=batch_size, collate_fn=collate_fn_test,
# num_workers=num_data_loader_workers,
# worker_init_fn=_init_fn)
# test_dataset.update_annots_test = test_dataset_settings_dict['update_annots_test']
# test_dataloader.dataset.time_out_length = time_out_length
# epoch = 0
# train_batch_indx = -1
# full_test_flag = False
# test(model, test_dataloader, full_test_flag,
# results_dict, train_batch_indx, epoch)
# json.dump(results_dict, open(
# naming_dict['fold_name'] + '/results_test.json', 'w'), indent=4)
# print('Finished non-sampling test')
# full_test_flag = True
# model.module.lstm_sets_dict['full_test_flag'] = True
# test(model, test_dataloader, full_test_flag,
# results_dict, train_batch_indx, epoch)
# json.dump(results_dict, open(
# naming_dict['fold_name'] + '/results_sampled.json', 'w'), indent=4)
# print('Finished sampling test')
print('DONE')
os._exit(0)
model.train()
model.module.autoregress = lstm_sets_dict['train_autoregress']
start_time = t.time()
iteration += 1
start_time = t.time()
print('finished')
def setup_ling():
if use_ling:
nlp = spacy.blank('en')
if language_size == 500:
print('using REALLY small language: 500')
nlp.from_disk(data_dir+'/spacy_tok_combined_500/')
elif language_size == 5000:
print('using small language: 5000')
nlp.from_disk(data_dir+'/spacy_tok_combined_5000/')
elif language_size == 10000:
print('using small language: 10000')
nlp.from_disk(data_dir+'/spacy_tok_combined_10000/')
else:
print('using medium language:20000')
nlp.from_disk(data_dir+'/spacy_tok_combined_20000/')
spacy.vocab.link_vectors_to_models(nlp.vocab)
unspec_tok = len(nlp.vocab.vectors.data)
sil_tok = unspec_tok + 1
if lstm_sets_dict['use_wait_stop_tok']:
lstm_sets_dict['unspec_tok'] = unspec_tok # for user
lstm_sets_dict['sil_tok'] = sil_tok
lstm_sets_dict['wait_tok'] = sil_tok + 1
lstm_sets_dict['stop_tok'] = sil_tok + 2
lstm_sets_dict['pad_tok'] = sil_tok + 3
num_embed_rows_to_add = 5
# padding_idx = lstm_sets_dict['stop_tok']
padding_idx = lstm_sets_dict['pad_tok']
else:
num_embed_rows_to_add = 1
# padding_idx = sil_tok
padding_idx = None
lstm_sets_dict['sil_tok'] = sil_tok
embedding_dim = nlp.vocab.vectors.data.shape[1]
num_embeddings = len(nlp.vocab.vectors.data)
if lstm_sets_dict['ling_use_glove']:
embeds = nn.Embedding.from_pretrained(
torch.FloatTensor(np.concatenate([np.array(nlp.vocab.vectors.data), np.zeros(
[num_embed_rows_to_add, embedding_dim])])),
padding_idx=padding_idx, freeze=lstm_sets_dict['ling_emb_freeze']
)
else:
num_embeddings = len(nlp.vocab.vectors.data)
embeds = nn.Embedding(
num_embeddings + 1, embedding_dim=embedding_dim, padding_idx=sil_tok).to(device)
embeds_reduce_layer_usr = nn.Linear(embedding_dim, 300)
embeds_reduce_layer_sys = nn.Linear(embedding_dim, 300)
embeds_dropout_usr = nn.Dropout(lstm_sets_dict['embeds_dropout'])
embeds_dropout_sys = nn.Dropout(lstm_sets_dict['embeds_dropout'])
embeds_usr = nn.Sequential(embeds_dropout_usr, embeds_reduce_layer_usr)
embeds_sys = nn.Sequential(embeds_dropout_sys, embeds_reduce_layer_sys)
num_feat_per_person = num_feat_per_person_dict['acous'] + embedding_dim
print('Embeddings loaded.')
else:
num_feat_per_person = num_feat_per_person_dict['acous']
embeds_usr, embeds_sys = 0, 0
sil_tok = -1
nlp = -1
return embeds_usr, embeds_sys, embeds, num_feat_per_person, sil_tok, nlp
def just_test_func(**kwargs):
print('******* JUST TESTING *****')
print('Loading test DATA')
context_vec_settings_dict = {
'train': len(train_file_list),
'valid': len(valid_file_list),
'test': len(test_file_list)
}
# if kwargs['load_test_model']:
if torch.cuda.is_available():
checkpoint = torch.load(just_test_folder+just_test_model)
else:
checkpoint = torch.load(
just_test_folder+just_test_model, map_location='cpu')
model = ContModel(kwargs['num_feat_per_person'], lstm_sets_dict, device,
context_vec_settings_dict, kwargs['embeds_usr'], kwargs['embeds_sys'], kwargs['embeds'])
model.temperature = lstm_sets_dict['temperature']
model.autoregress = lstm_sets_dict['test_autoregress']
# only test on one gpu
if torch.cuda.device_count() > 1:
model = torch.nn.DataParallel(model, device_ids=[0], dim=0)
else:
model = torch.nn.DataParallel(model, dim=0)
strict = True
model.load_state_dict(checkpoint, strict=strict)
model.to(device)
model.eval()
embeds = model.module.embeds
test_dataset = ContDataset(test_dataset_settings_dict)
collate_fn_test = test_dataset.collate_fn
test_dataset.embeds_usr = kwargs['embeds_usr']
test_dataset.embeds_sys = kwargs['embeds_sys']
test_dataset.embeds = embeds
test_dataset.nlp = kwargs['nlp']
test_dataset.sil_tok = kwargs['sil_tok']
def _init_fn(): return np.random.seed(lstm_sets_dict['seed'])
if vae_experiments:
test_dataset_subset = get_vae_dataset(
test_dataset, test_dataset_settings_dict['update_annots_test'], vae_target_da)
test_dataset_subset = test_dataset_subset * vae_data_multiplier
if load_encodings and load_second_encodings:
test_dataset_subset_2 = get_vae_dataset(
test_dataset, test_dataset_settings_dict['update_annots_test'], vae_target_da)
test_dataset_subset_2 = test_dataset_subset * vae_data_multiplier_2
test_dataset_subset = test_dataset_subset[:len(
test_dataset_subset)//2] + test_dataset_subset_2[:len(test_dataset_subset_2)//2]
print('Target da: {}\t number of points: {}'.format(
vae_target_da, len(test_dataset_subset)))
test_sampler = SubsetRandomSampler(test_dataset_subset)
test_dataloader = DataLoader(test_dataset, sampler=test_sampler,
batch_size=batch_size, collate_fn=collate_fn_test,
num_workers=num_data_loader_workers, drop_last=False,
worker_init_fn=_init_fn)
if load_encodings:
mu, log_var = get_vae_encodings(lstm_sets_dict, False)
if load_second_encodings:
mu_2, log_var_2 = get_vae_encodings(lstm_sets_dict, True)
mu = (mu + mu_2) / 2
log_var = np.log(0.5*( | np.exp(log_var) | numpy.exp |
from keras.layers.convolutional import Conv2D, MaxPooling2D
from keras.layers.normalization import BatchNormalization
from keras.layers import Activation, Flatten, Dropout, Dense, GlobalAveragePooling2D
from keras.models import Sequential, load_model
from keras.optimizers import Adam
import tensorflow as tf
import keras
import numpy as np
import time
import random
from collections import deque
"""Code taken/implemented from https://pythonprogramming.net/reinforcement-learning-self-driving-autonomous-cars-carla-python/"""
from keras.callbacks import TensorBoard
# Own Tensorboard class
class ModifiedTensorBoard(TensorBoard):
# Overriding init to set initial step and writer (we want one log file for all .fit() calls)
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.step = 1
self.writer = tf.summary.FileWriter(self.log_dir)
# Overriding this method to stop creating default log writer
def set_model(self, model):
pass
# Overrided, saves logs with our step number
# (otherwise every .fit() will start writing from 0th step)
def on_epoch_end(self, epoch, logs=None):
self.update_stats(**logs)
# Overrided
# We train for one batch only, no need to save anything at epoch end
def on_batch_end(self, batch, logs=None):
pass
# Overrided, so won't close writer
def on_train_end(self, _):
pass
# Custom method for saving own metrics
# Creates writer, writes custom metrics and closes writer
def update_stats(self, **stats):
self._write_logs(stats, self.step)
class DQNAgent:
def __init__(self):
self.model = load_model('models/model450.model')
#self.model = self.create_model()
#self.target_model = self.create_model()
self.target_model = load_model('models/model450.model')
self.replay_memory = deque(maxlen=1000000)
self.tensorboard = ModifiedTensorBoard(log_dir=f"logs/{'carla'}-{int(time.time())}")
self.target_update_counter = 0
self.graph = tf.get_default_graph()
self.terminate = False
self.last_logged_episode = 0
self.training_initialized = False
def create_model(self):
model = Sequential()
model.add(Dense(64, input_shape=(600,400,3), activation='relu'))
model.add(Dense(64, activation='relu'))
model.add(Dense(64, activation='relu'))
model.add(Flatten())
model.add(Dense(3))
model.add(Activation('linear'))
model.compile(loss='mse', optimizer=Adam(lr=0.001), metrics=['accuracy'])
return model
def add_memory(self, transition):
# transition = (current_state, action, reward, new_state, done)
self.replay_memory.append(transition)
def random_action(self):
action = np.random.randint(0, 3)
return action
def train(self):
#Maybe change this to 1000 if anything goes bad
if len(self.replay_memory) < 64:
return
minibatch = random.sample(self.replay_memory, 16)
current_states = | np.array([transition[0] for transition in minibatch]) | numpy.array |
import numpy as np
import matplotlib.pyplot as plt
import os
import sys
import matplotlib.colors as colors
import matplotlib.cm as cm
from matplotlib import rcParams
from mpl_toolkits.axes_grid1 import make_axes_locatable
from scipy.interpolate import griddata
from scipy.interpolate import UnivariateSpline
rcParams.update({'font.size': 22})
def colorbar(mappable,ax):
pos = ax.get_subplotspec().get_position(fig)
cb_width = 0.02
cb_height = pos.y1-pos.y0
vertical_position = pos.y0
horizontal_position = 0.905
cax = fig.add_axes([horizontal_position, vertical_position, cb_width, cb_height])
clb = fig.colorbar(cmmapable,cax=cax,orientation='vertical')
return clb
os.chdir('..')
home = os.getcwd() + "/"
plot_dir = home + "Figs/"
data_dir = home + "data/"
ax_loc = np.array([[0.125,0.885],[0.55,0.885],[0.125,0.455],[0.55,0.455]])
aspect = 16./9.
width = 10
lw = 4
fs = 'x-large'
clabel = ["","$\Delta \epsilon$", "$\Delta T_s$", "$\Delta f_{ice}$", ]
sublabel = ['a','b','c','d']
fig = plt.figure(1,figsize=(aspect*width,2*width),dpi=300)
ax1 = fig.add_subplot(611)
ax2 = fig.add_subplot(612)
ax3 = fig.add_subplot(613)
ax4 = fig.add_subplot(614)
ax5 = fig.add_subplot(615)
ax6 = fig.add_subplot(616)
ax_list = [ax1,ax2,ax3,ax4,ax5,ax6]
sub_lbl = ['a','b','c','d','e','f']
data = np.genfromtxt(data_dir+"VPlanet_data_GenBin_map_10.txt",delimiter=',',comments='#')
data[data[:,3]>90,2] = np.inf
data[data[:,6]==3,7] = 3
for i in range(0,6):
ax = ax_list[i]
if i == 0:
Q = data[:,2]
cmap = "gist_rainbow"
vmin, vmax = 0,90
elif i == 1:
cmap = "magma"
data[data[:,3]==0,3]=0.01
Q = np.log10(data[:,3])
vmin, vmax = 0, np.log10(60)
elif i == 2:
data[data[:,4]==0,4]=0.01
Q = np.log10(data[:,4])
cmap = "plasma"
vmin, vmax = -2,0
elif i == 3:
Q = data[:,-4]
cmap = colors.ListedColormap(['k', 'gray', 'b', 'r'])
vmin, vmax = -0.5,3
elif i == 4:
Q = data[:,-2]
cmap = cm.get_cmap("plasma").copy()
vmin, vmax = 0.001,0.4
else:
Q = data[:,-1]
cmap = cm.get_cmap("plasma").copy()
vmin, vmax = 0.001,0.4
#Q_cut = np.where(~np.isfinite(data[:,2]))[0]
#Q[Q_cut] = -1.
abin = data[:,0]
ebin = data[:,1]
xi = np.arange(10,91,1)
yi = np.arange(0,.91,0.01)
zi = griddata((abin,ebin),Q,(xi[None,:],yi[:,None]),method = 'nearest')
my_cmap = cm.get_cmap(cmap).copy()
norm = colors.Normalize(vmin,vmax)
cmmapable = cm.ScalarMappable(norm,my_cmap)
cmmapable.set_array(range(0,1))
my_cmap.set_over('w')
if i > 3:
my_cmap.set_under('k')
my_cmap.set_over('r')
if i == 3:
bounds = [0.,1.,2.,3.,4.]
norm = colors.BoundaryNorm(bounds, cmap.N)
CS = ax.pcolormesh(xi,yi+0.005,zi,cmap = my_cmap,norm=norm,shading='auto',zorder=2)
switch_states = np.zeros(len(abin))
for k in range(0,len(abin)):
if data[k,-3] != data[k,-4]:
switch_states[k] = 1
#ax.plot(data[switch_states==1,0],data[switch_states==1,1]+0.005,'wx',ms=4,zorder=4,alpha=0.75)
switch_rows = np.where(switch_states==1)[0]
ax.plot(data[switch_rows,0],data[switch_rows,1],'m+',ms=6,zorder=4)
else:
CS = ax.pcolormesh(xi,yi+0.005,zi,cmap = my_cmap,vmin=vmin,vmax=vmax,shading='auto')
#ax.plot(23,46,color='w',marker='$\oplus$',ms=25)
#ax.set_xticks(np.arange(10,100,10))
ax.set_yticks(np.arange(0,1.,0.2))
ax.set_ylim(0,0.9)
ax.set_xlim(10,90)
ax.tick_params(axis='both', direction='out',length = 8.0, width = 8.0,labelsize=fs)
ax.text(0.02,0.86,sub_lbl[i], color='k',fontsize='x-large',weight='bold',horizontalalignment='left',transform=ax.transAxes)
ax.set_ylabel("$e_{\\rm bin}$",fontsize=fs)
if i == 3:
ax.text(1.02,0.8,"ice free", color='k',fontsize='medium',weight='bold',horizontalalignment='left',transform=ax.transAxes)
ax.text(1.02,0.6,"ice caps", color='gray',fontsize='medium',weight='bold',horizontalalignment='left',transform=ax.transAxes)
ax.text(1.02,0.4,"ice belt", color='b',fontsize='medium',weight='bold',horizontalalignment='left',transform=ax.transAxes)
ax.text(1.02,0.2,"snowball", color='r',fontsize='medium',weight='bold',horizontalalignment='left',transform=ax.transAxes)
if i < 5:
ax.set_xticklabels([])
else:
ax.set_xlabel("$a_{\\rm bin}$ (au)",fontsize=fs)
if i == 0:
color_label=r'$\Delta \epsilon$'
cbar=colorbar(CS,ax)
cbar.set_label(color_label,fontsize=fs)
cticks = | np.arange(0,105,15) | numpy.arange |
"""Matrix algebra for counting triplet motifs"""
import numpy as np
import pandas as pd
from itertools import permutations, combinations_with_replacement
from util import index_to_directed_triplet_motif_matrix, basis_vector, bin_subsets, binary_digits
def tupp(m):
return tuple(m.ravel())
def matt(m):
return np.array(m).reshape(3, 3)
def triplet_pattern(t):
"""Convert an arbitrary 3-tuple into a string specifying the distinct elements."""
if t[0] == t[1] == t[2]:
return "AAA"
if t[0] == t[1]:
return "AAB"
if t[1] == t[2]:
return "ABB"
return "ABC"
def tuple_swap_02(t):
return (t[2], t[1], t[0])
################################################################################
# Isomorphisms of 3 node graphs
################################################################################
permutation_matrices = {(s0, s1, s2): np.array([basis_vector(s0), basis_vector(s1), basis_vector(s2)]) for s0, s1, s2 in permutations(range(3))}
def get_permutation_matrices():
"""
There's some aspects of python's import system that I cannot seem to figure
out, so there's going to be a lot of these `get_blah()` functions in this
file.
"""
return permutation_matrices
def isomorphic(tg, th):
"""Check if graphs, represented by raveled tuples tg, th, are isomorphic"""
mg = matt(tg)
mh = matt(th)
for P in permutation_matrices.values():
if np.all(P @ mg @ P.T == mh):
return True
return False
def color_isomorphic(tg, th, cg, ch):
mg = matt(tg)
mh = matt(th)
for s, P in permutation_matrices.items():
colors_match = all(cg[si] == ch[i] for si, i in zip(s, range(3)))
if colors_match and np.all(P @ mg @ P.T == mh):
return True
return False
################################################################################
# Mapping from graphs to their isomorphism class
################################################################################
iso_representative_AAA = {} # map from tuple to represantive tuple
iso_classes_AAA = {} # preimage map from represntative tuple to list of tuples
for k in range(64):
m = index_to_directed_triplet_motif_matrix(k)
tm = tupp(m) # current tuple
for iso in iso_classes_AAA: # iso is the representative tuple
if isomorphic(tm, iso):
iso_classes_AAA[iso].append(tm)
iso_representative_AAA[tm] = iso
break
else:
iso_classes_AAA[tm] = [tm]
iso_representative_AAA[tm] = tm
iso_classes_AAB = {} # preimage map from representative to list of tuples. Color is always AAB pattern.
iso_representative_AAB = {} # map from tuple to representative tuple. Color is always AAB pattern.
colors_AAB = [0, 0, 1]
for k in range(64):
# base graph has colors AAB
m = index_to_directed_triplet_motif_matrix(k)
tm = tupp(m)
for iso in iso_classes_AAB:
if color_isomorphic(tm, iso, colors_AAB, colors_AAB):
iso_classes_AAB[iso].append(tm)
iso_representative_AAB[tm] = iso
break
else:
iso_classes_AAB[tm] = [tm]
iso_representative_AAB[tm] = tm
iso_classes_ABB = {} # preimage map from representative to list of tuples. Color is always ABB
iso_representative_ABB = {} # map from tuple to representative tuple
colors_ABB = [0, 1, 1]
for k in range(64):
# base graph has colors ABB
m = index_to_directed_triplet_motif_matrix(k)
tm = tupp(m)
for iso in iso_classes_ABB:
if color_isomorphic(tm, iso, colors_ABB, colors_ABB):
iso_classes_ABB[iso].append(tm)
iso_representative_ABB[tm] = iso
break
else:
iso_classes_ABB[tm] = [tm]
iso_representative_ABB[tm] = tm
iso_classes_ABC, iso_representative_ABC = {}, {}
for k in range(64):
t = tupp(index_to_directed_triplet_motif_matrix(k))
iso_classes_ABC[t] = [t] # preimage map from representative to list of tuples. Always singleton
iso_representative_ABC[t] = t # map from tuple to representative tuple (just identity map)
def get_iso_classes(pat):
if pat[0] == pat[1] == pat[2]:
return iso_classes_AAA
if pat[0] == pat[1]:
return iso_classes_AAB
if pat[1] == pat[2]:
return iso_classes_ABB
if pat[0] != pat[1] != pat[2]:
return iso_classes_ABC
return {}
def get_iso_representative(pat):
if pat[0] == pat[1] == pat[2]:
return iso_representative_AAA
if pat[0] == pat[1]:
return iso_representative_AAB
if pat[1] == pat[2]:
return iso_representative_ABB
if pat[0] != pat[1] != pat[2]:
return iso_representative_ABC
return {}
def get_iso_list(pat):
if pat[0] == pat[1] == pat[2]:
return iso_list_AAA
if pat[0] == pat[1]:
return iso_list_AAB
if pat[1] == pat[2]:
return iso_list_ABB
if pat[0] != pat[1] != pat[2]:
return iso_list_ABC
return []
def class_representative(ct, gt):
"""
Get the graph tuple of the represntative of the class in whic `gt` lives
given the nodes are colored according to the pattern in `ct`.
Note that this skips pattern 'ABA' since all my current algorithms
will avoid using that pattern.
"""
if ct[0] == ct[1] == ct[2]: # Case AAA
return iso_representative_AAA[gt]
if ct[0] == ct[1] != ct[2]: # Case AAB
return iso_representative_AAB[gt]
if ct[0] != ct[1] == ct[2]: # Case ABB
return iso_representative_ABB[gt]
if ct[0] != ct[1] != ct[2]: # Case ABC
return iso_representative_ABC[gt]
return None
################################################################################
# Mobius Inversion-type matrices
################################################################################
iso_list_AAA = list(iso_classes_AAA.keys())
exact_to_over_AAA = np.zeros((len(iso_list_AAA), len(iso_list_AAA)), dtype=int)
for j, tm_j in enumerate(iso_list_AAA):
for tm_i in bin_subsets(tm_j):
i = iso_list_AAA.index(iso_representative_AAA[tm_i])
exact_to_over_AAA[i, j] += 1
over_to_exact_AAA = | np.linalg.inv(exact_to_over_AAA) | numpy.linalg.inv |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Tests for the photometry module.
"""
import pytest
import numpy as np
from numpy.testing import (assert_allclose, assert_array_equal,
assert_array_less)
from astropy.coordinates import SkyCoord
from astropy.io import fits
from astropy.nddata import NDData, StdDevUncertainty
from astropy.table import Table
import astropy.units as u
from astropy.utils.exceptions import AstropyDeprecationWarning
from astropy.wcs import WCS
from ..photometry import aperture_photometry
from ..circle import (CircularAperture, CircularAnnulus, SkyCircularAperture,
SkyCircularAnnulus)
from ..ellipse import (EllipticalAperture, EllipticalAnnulus,
SkyEllipticalAperture, SkyEllipticalAnnulus)
from ..rectangle import (RectangularAperture, RectangularAnnulus,
SkyRectangularAperture, SkyRectangularAnnulus)
from ...datasets import get_path, make_4gaussians_image, make_wcs, make_gwcs
from ...utils._optional_deps import HAS_GWCS, HAS_MATPLOTLIB # noqa
APERTURE_CL = [CircularAperture,
CircularAnnulus,
EllipticalAperture,
EllipticalAnnulus,
RectangularAperture,
RectangularAnnulus]
TEST_APERTURES = list(zip(APERTURE_CL, ((3.,),
(3., 5.),
(3., 5., 1.),
(3., 5., 4., 12./5., 1.),
(5, 8, np.pi / 4),
(8, 12, 8, 16./3., np.pi / 8))))
@pytest.mark.parametrize(('aperture_class', 'params'), TEST_APERTURES)
def test_outside_array(aperture_class, params):
data = np.ones((10, 10), dtype=float)
aperture = aperture_class((-60, 60), *params)
fluxtable = aperture_photometry(data, aperture)
# aperture is fully outside array:
assert np.isnan(fluxtable['aperture_sum'])
@pytest.mark.parametrize(('aperture_class', 'params'), TEST_APERTURES)
def test_inside_array_simple(aperture_class, params):
data = np.ones((40, 40), dtype=float)
aperture = aperture_class((20., 20.), *params)
table1 = aperture_photometry(data, aperture, method='center',
subpixels=10)
table2 = aperture_photometry(data, aperture, method='subpixel',
subpixels=10)
table3 = aperture_photometry(data, aperture, method='exact', subpixels=10)
true_flux = aperture.area
assert table1['aperture_sum'] < table3['aperture_sum']
if not isinstance(aperture, (RectangularAperture, RectangularAnnulus)):
assert_allclose(table3['aperture_sum'], true_flux)
assert_allclose(table2['aperture_sum'], table3['aperture_sum'],
atol=0.1)
@pytest.mark.skipif('not HAS_MATPLOTLIB')
@pytest.mark.parametrize(('aperture_class', 'params'), TEST_APERTURES)
def test_aperture_plots(aperture_class, params):
# This test should run without any errors, and there is no return
# value.
# TODO: check the content of the plot
aperture = aperture_class((20., 20.), *params)
aperture.plot()
def test_aperture_pixel_positions():
pos1 = (10, 20)
pos2 = [(10, 20)]
r = 3
ap1 = CircularAperture(pos1, r)
ap2 = CircularAperture(pos2, r)
assert not np.array_equal(ap1.positions, ap2.positions)
class BaseTestAperturePhotometry:
def test_array_error(self):
# Array error
error = np.ones(self.data.shape, dtype=float)
if not hasattr(self, 'mask'):
mask = None
true_error = np.sqrt(self.area)
else:
mask = self.mask
# 1 masked pixel
true_error = np.sqrt(self.area - 1)
table1 = aperture_photometry(self.data,
self.aperture, method='center',
mask=mask, error=error)
table2 = aperture_photometry(self.data,
self.aperture,
method='subpixel', subpixels=12,
mask=mask, error=error)
table3 = aperture_photometry(self.data,
self.aperture, method='exact',
mask=mask, error=error)
if not isinstance(self.aperture, (RectangularAperture,
RectangularAnnulus)):
assert_allclose(table3['aperture_sum'], self.true_flux)
assert_allclose(table2['aperture_sum'], table3['aperture_sum'],
atol=0.1)
assert np.all(table1['aperture_sum'] < table3['aperture_sum'])
if not isinstance(self.aperture, (RectangularAperture,
RectangularAnnulus)):
assert_allclose(table3['aperture_sum_err'], true_error)
assert_allclose(table2['aperture_sum_err'],
table3['aperture_sum_err'], atol=0.1)
assert np.all(table1['aperture_sum_err'] < table3['aperture_sum_err'])
class TestCircular(BaseTestAperturePhotometry):
def setup_class(self):
self.data = np.ones((40, 40), dtype=float)
position = (20., 20.)
r = 10.
self.aperture = CircularAperture(position, r)
self.area = np.pi * r * r
self.true_flux = self.area
class TestCircularArray(BaseTestAperturePhotometry):
def setup_class(self):
self.data = np.ones((40, 40), dtype=float)
position = ((20., 20.), (25., 25.))
r = 10.
self.aperture = CircularAperture(position, r)
self.area = np.pi * r * r
self.area = np.array((self.area, ) * 2)
self.true_flux = self.area
class TestCircularAnnulus(BaseTestAperturePhotometry):
def setup_class(self):
self.data = np.ones((40, 40), dtype=float)
position = (20., 20.)
r_in = 8.
r_out = 10.
self.aperture = CircularAnnulus(position, r_in, r_out)
self.area = np.pi * (r_out * r_out - r_in * r_in)
self.true_flux = self.area
class TestCircularAnnulusArray(BaseTestAperturePhotometry):
def setup_class(self):
self.data = np.ones((40, 40), dtype=float)
position = ((20., 20.), (25., 25.))
r_in = 8.
r_out = 10.
self.aperture = CircularAnnulus(position, r_in, r_out)
self.area = np.pi * (r_out * r_out - r_in * r_in)
self.area = np.array((self.area, ) * 2)
self.true_flux = self.area
class TestElliptical(BaseTestAperturePhotometry):
def setup_class(self):
self.data = np.ones((40, 40), dtype=float)
position = (20., 20.)
a = 10.
b = 5.
theta = -np.pi / 4.
self.aperture = EllipticalAperture(position, a, b, theta=theta)
self.area = np.pi * a * b
self.true_flux = self.area
class TestEllipticalAnnulus(BaseTestAperturePhotometry):
def setup_class(self):
self.data = np.ones((40, 40), dtype=float)
position = (20., 20.)
a_in = 5.
a_out = 8.
b_out = 5.
theta = -np.pi / 4.
self.aperture = EllipticalAnnulus(position, a_in, a_out, b_out,
theta=theta)
self.area = (np.pi * (a_out * b_out) -
np.pi * (a_in * b_out * a_in / a_out))
self.true_flux = self.area
class TestRectangularAperture(BaseTestAperturePhotometry):
def setup_class(self):
self.data = np.ones((40, 40), dtype=float)
position = (20., 20.)
h = 5.
w = 8.
theta = np.pi / 4.
self.aperture = RectangularAperture(position, w, h, theta=theta)
self.area = h * w
self.true_flux = self.area
class TestRectangularAnnulus(BaseTestAperturePhotometry):
def setup_class(self):
self.data = np.ones((40, 40), dtype=float)
position = (20., 20.)
h_out = 8.
w_in = 8.
w_out = 12.
h_in = w_in * h_out / w_out
theta = np.pi / 8.
self.aperture = RectangularAnnulus(position, w_in, w_out, h_out,
theta=theta)
self.area = h_out * w_out - h_in * w_in
self.true_flux = self.area
class TestMaskedSkipCircular(BaseTestAperturePhotometry):
def setup_class(self):
self.data = np.ones((40, 40), dtype=float)
self.mask = np.zeros((40, 40), dtype=bool)
self.mask[20, 20] = True
position = (20., 20.)
r = 10.
self.aperture = CircularAperture(position, r)
self.area = np.pi * r * r
self.true_flux = self.area - 1
class BaseTestDifferentData:
def test_basic_circular_aperture_photometry(self):
aperture = CircularAperture(self.position, self.radius)
table = aperture_photometry(self.data, aperture,
method='exact')
assert_allclose(table['aperture_sum'].value, self.true_flux)
assert table['aperture_sum'].unit, self.fluxunit
assert np.all(table['xcenter'].value ==
np.transpose(self.position)[0])
assert np.all(table['ycenter'].value ==
np.transpose(self.position)[1])
class TestInputNDData(BaseTestDifferentData):
def setup_class(self):
data = np.ones((40, 40), dtype=float)
self.data = NDData(data, unit=u.adu)
self.radius = 3
self.position = [(20, 20), (30, 30)]
self.true_flux = np.pi * self.radius * self.radius
self.fluxunit = u.adu
@pytest.mark.remote_data
def test_wcs_based_photometry_to_catalogue():
pathcat = get_path('spitzer_example_catalog.xml', location='remote')
pathhdu = get_path('spitzer_example_image.fits', location='remote')
hdu = fits.open(pathhdu)
data = u.Quantity(hdu[0].data, unit=hdu[0].header['BUNIT'])
wcs = WCS(hdu[0].header)
catalog = Table.read(pathcat)
pos_skycoord = SkyCoord(catalog['l'], catalog['b'], frame='galactic')
photometry_skycoord = aperture_photometry(
data, SkyCircularAperture(pos_skycoord, 4 * u.arcsec), wcs=wcs)
# Photometric unit conversion is needed to match the catalogue
factor = (1.2 * u.arcsec) ** 2 / u.pixel
converted_aperture_sum = (photometry_skycoord['aperture_sum'] *
factor).to(u.mJy / u.pixel)
fluxes_catalog = catalog['f4_5'].filled()
# There shouldn't be large outliers, but some differences is OK, as
# fluxes_catalog is based on PSF photometry, etc.
assert_allclose(fluxes_catalog, converted_aperture_sum.value, rtol=1e0)
assert(np.mean(np.fabs(((fluxes_catalog - converted_aperture_sum.value) /
fluxes_catalog))) < 0.1)
# close the file
hdu.close()
def test_wcs_based_photometry():
data = make_4gaussians_image()
wcs = make_wcs(data.shape)
# hard wired positions in make_4gaussian_image
pos_orig_pixel = u.Quantity(([160., 25., 150., 90.],
[70., 40., 25., 60.]), unit=u.pixel)
pos_skycoord = wcs.pixel_to_world(pos_orig_pixel[0], pos_orig_pixel[1])
pos_skycoord_s = pos_skycoord[2]
photometry_skycoord_circ = aperture_photometry(
data, SkyCircularAperture(pos_skycoord, 3 * u.arcsec), wcs=wcs)
photometry_skycoord_circ_2 = aperture_photometry(
data, SkyCircularAperture(pos_skycoord, 2 * u.arcsec), wcs=wcs)
photometry_skycoord_circ_s = aperture_photometry(
data, SkyCircularAperture(pos_skycoord_s, 3 * u.arcsec), wcs=wcs)
assert_allclose(photometry_skycoord_circ['aperture_sum'][2],
photometry_skycoord_circ_s['aperture_sum'])
photometry_skycoord_circ_ann = aperture_photometry(
data, SkyCircularAnnulus(pos_skycoord, 2 * u.arcsec, 3 * u.arcsec),
wcs=wcs)
photometry_skycoord_circ_ann_s = aperture_photometry(
data, SkyCircularAnnulus(pos_skycoord_s, 2 * u.arcsec, 3 * u.arcsec),
wcs=wcs)
assert_allclose(photometry_skycoord_circ_ann['aperture_sum'][2],
photometry_skycoord_circ_ann_s['aperture_sum'])
assert_allclose(photometry_skycoord_circ_ann['aperture_sum'],
photometry_skycoord_circ['aperture_sum'] -
photometry_skycoord_circ_2['aperture_sum'])
photometry_skycoord_ell = aperture_photometry(
data, SkyEllipticalAperture(pos_skycoord, 3 * u.arcsec,
3.0001 * u.arcsec, theta=45 * u.arcsec),
wcs=wcs)
photometry_skycoord_ell_2 = aperture_photometry(
data, SkyEllipticalAperture(pos_skycoord, 2 * u.arcsec,
2.0001 * u.arcsec, theta=45 * u.arcsec),
wcs=wcs)
photometry_skycoord_ell_s = aperture_photometry(
data, SkyEllipticalAperture(pos_skycoord_s, 3 * u.arcsec,
3.0001 * u.arcsec, theta=45 * u.arcsec),
wcs=wcs)
photometry_skycoord_ell_ann = aperture_photometry(
data, SkyEllipticalAnnulus(pos_skycoord, 2 * u.arcsec, 3 * u.arcsec,
3.0001 * u.arcsec, theta=45 * u.arcsec),
wcs=wcs)
photometry_skycoord_ell_ann_s = aperture_photometry(
data, SkyEllipticalAnnulus(pos_skycoord_s, 2 * u.arcsec, 3 * u.arcsec,
3.0001 * u.arcsec, theta=45 * u.arcsec),
wcs=wcs)
assert_allclose(photometry_skycoord_ell['aperture_sum'][2],
photometry_skycoord_ell_s['aperture_sum'])
assert_allclose(photometry_skycoord_ell_ann['aperture_sum'][2],
photometry_skycoord_ell_ann_s['aperture_sum'])
assert_allclose(photometry_skycoord_ell['aperture_sum'],
photometry_skycoord_circ['aperture_sum'], rtol=5e-3)
assert_allclose(photometry_skycoord_ell_ann['aperture_sum'],
photometry_skycoord_ell['aperture_sum'] -
photometry_skycoord_ell_2['aperture_sum'], rtol=1e-4)
photometry_skycoord_rec = aperture_photometry(
data, SkyRectangularAperture(pos_skycoord,
6 * u.arcsec, 6 * u.arcsec,
0 * u.arcsec),
method='subpixel', subpixels=20, wcs=wcs)
photometry_skycoord_rec_4 = aperture_photometry(
data, SkyRectangularAperture(pos_skycoord,
4 * u.arcsec, 4 * u.arcsec,
0 * u.arcsec),
method='subpixel', subpixels=20, wcs=wcs)
photometry_skycoord_rec_s = aperture_photometry(
data, SkyRectangularAperture(pos_skycoord_s,
6 * u.arcsec, 6 * u.arcsec,
0 * u.arcsec),
method='subpixel', subpixels=20, wcs=wcs)
photometry_skycoord_rec_ann = aperture_photometry(
data, SkyRectangularAnnulus(pos_skycoord, 4 * u.arcsec, 6 * u.arcsec,
6 * u.arcsec, theta=0 * u.arcsec),
method='subpixel', subpixels=20, wcs=wcs)
photometry_skycoord_rec_ann_s = aperture_photometry(
data, SkyRectangularAnnulus(pos_skycoord_s, 4 * u.arcsec,
6 * u.arcsec, 6 * u.arcsec,
theta=0 * u.arcsec),
method='subpixel', subpixels=20, wcs=wcs)
assert_allclose(photometry_skycoord_rec['aperture_sum'][2],
photometry_skycoord_rec_s['aperture_sum'])
assert np.all(photometry_skycoord_rec['aperture_sum'] >
photometry_skycoord_circ['aperture_sum'])
assert_allclose(photometry_skycoord_rec_ann['aperture_sum'][2],
photometry_skycoord_rec_ann_s['aperture_sum'])
assert_allclose(photometry_skycoord_rec_ann['aperture_sum'],
photometry_skycoord_rec['aperture_sum'] -
photometry_skycoord_rec_4['aperture_sum'], rtol=1e-4)
def test_basic_circular_aperture_photometry_unit():
radius = 3
true_flux = np.pi * radius * radius
aper = CircularAperture((12, 12), radius)
data1 = np.ones((25, 25), dtype=float)
table1 = aperture_photometry(data1, aper)
assert_allclose(table1['aperture_sum'], true_flux)
unit = u.adu
data2 = u.Quantity(data1 * unit)
table2 = aperture_photometry(data2, aper)
assert_allclose(table2['aperture_sum'].value, true_flux)
assert table2['aperture_sum'].unit == data2.unit == unit
error1 = np.ones((25, 25))
with pytest.raises(ValueError):
# data has unit, but error does not
aperture_photometry(data2, aper, error=error1)
error2 = u.Quantity(error1 * u.Jy)
with pytest.raises(ValueError):
# data and error have different units
aperture_photometry(data2, aper, error=error2)
def test_aperture_photometry_with_error_units():
"""Test aperture_photometry when error has units (see #176)."""
data1 = np.ones((40, 40), dtype=float)
data2 = u.Quantity(data1, unit=u.adu)
error = u.Quantity(data1, unit=u.adu)
radius = 3
true_flux = np.pi * radius * radius
unit = u.adu
position = (20, 20)
table1 = aperture_photometry(data2, CircularAperture(position, radius),
error=error)
assert_allclose(table1['aperture_sum'].value, true_flux)
assert_allclose(table1['aperture_sum_err'].value, np.sqrt(true_flux))
assert table1['aperture_sum'].unit == unit
assert table1['aperture_sum_err'].unit == unit
def test_aperture_photometry_inputs_with_mask():
"""
Test that aperture_photometry does not modify the input
data or error array when a mask is input.
"""
data = np.ones((5, 5))
aperture = CircularAperture((2, 2), 2.)
mask = np.zeros_like(data, dtype=bool)
data[2, 2] = 100. # bad pixel
mask[2, 2] = True
error = np.sqrt(data)
data_in = data.copy()
error_in = error.copy()
t1 = aperture_photometry(data, aperture, error=error, mask=mask)
assert_array_equal(data, data_in)
assert_array_equal(error, error_in)
assert_allclose(t1['aperture_sum'][0], 11.5663706144)
t2 = aperture_photometry(data, aperture)
assert_allclose(t2['aperture_sum'][0], 111.566370614)
TEST_ELLIPSE_EXACT_APERTURES = [(3.469906, 3.923861394, 3.),
(0.3834415188257778, 0.3834415188257778, 0.3)]
@pytest.mark.parametrize('x,y,r', TEST_ELLIPSE_EXACT_APERTURES)
def test_ellipse_exact_grid(x, y, r):
"""
Test elliptical exact aperture photometry on a grid of pixel positions.
This is a regression test for the bug discovered in this issue:
https://github.com/astropy/photutils/issues/198
"""
data = np.ones((10, 10))
aperture = EllipticalAperture((x, y), r, r, 0.)
t = aperture_photometry(data, aperture, method='exact')
actual = t['aperture_sum'][0] / (np.pi * r ** 2)
assert_allclose(actual, 1)
@pytest.mark.parametrize('value', [np.nan, np.inf])
def test_nan_inf_mask(value):
"""Test that nans and infs are properly masked [267]."""
data = np.ones((9, 9))
mask = np.zeros_like(data, dtype=bool)
data[4, 4] = value
mask[4, 4] = True
radius = 2.
aper = CircularAperture((4, 4), radius)
tbl = aperture_photometry(data, aper, mask=mask)
desired = (np.pi * radius**2) - 1
assert_allclose(tbl['aperture_sum'], desired)
def test_aperture_partial_overlap():
data = np.ones((20, 20))
error = np.ones((20, 20))
xypos = [(10, 10), (0, 0), (0, 19), (19, 0), (19, 19)]
r = 5.
aper = CircularAperture(xypos, r=r)
tbl = aperture_photometry(data, aper, error=error)
assert_allclose(tbl['aperture_sum'][0], np.pi * r ** 2)
assert_array_less(tbl['aperture_sum'][1:], np.pi * r ** 2)
unit = u.MJy / u.sr
tbl = aperture_photometry(data * unit, aper, error=error * unit)
assert_allclose(tbl['aperture_sum'][0].value, np.pi * r ** 2)
assert_array_less(tbl['aperture_sum'][1:].value, np.pi * r ** 2)
assert_array_less(tbl['aperture_sum_err'][1:].value, np.pi * r ** 2)
assert tbl['aperture_sum'].unit == unit
assert tbl['aperture_sum_err'].unit == unit
def test_pixel_aperture_repr():
aper = CircularAperture((10, 20), r=3.0)
assert '<CircularAperture(' in repr(aper)
assert 'Aperture: CircularAperture' in str(aper)
aper = CircularAnnulus((10, 20), r_in=3.0, r_out=5.0)
assert '<CircularAnnulus(' in repr(aper)
assert 'Aperture: CircularAnnulus' in str(aper)
aper = EllipticalAperture((10, 20), a=5.0, b=3.0, theta=15.0)
assert '<EllipticalAperture(' in repr(aper)
assert 'Aperture: EllipticalAperture' in str(aper)
aper = EllipticalAnnulus((10, 20), a_in=4.0, a_out=8.0, b_out=4.0,
theta=15.0)
assert '<EllipticalAnnulus(' in repr(aper)
assert 'Aperture: EllipticalAnnulus' in str(aper)
aper = RectangularAperture((10, 20), w=5.0, h=3.0, theta=15.0)
assert '<RectangularAperture(' in repr(aper)
assert 'Aperture: RectangularAperture' in str(aper)
aper = RectangularAnnulus((10, 20), w_in=4.0, w_out=8.0, h_out=4.0,
theta=15.0)
assert '<RectangularAnnulus(' in repr(aper)
assert 'Aperture: RectangularAnnulus' in str(aper)
def test_sky_aperture_repr():
s = SkyCoord([1, 2], [3, 4], unit='deg')
aper = SkyCircularAperture(s, r=3*u.deg)
a_repr = ('<SkyCircularAperture(<SkyCoord (ICRS): (ra, dec) in deg\n'
' [(1., 3.), (2., 4.)]>, r=3.0 deg)>')
a_str = ('Aperture: SkyCircularAperture\npositions: <SkyCoord '
'(ICRS): (ra, dec) in deg\n [(1., 3.), (2., 4.)]>\n'
'r: 3.0 deg')
assert repr(aper) == a_repr
assert str(aper) == a_str
aper = SkyCircularAnnulus(s, r_in=3.*u.deg, r_out=5*u.deg)
a_repr = ('<SkyCircularAnnulus(<SkyCoord (ICRS): (ra, dec) in deg\n'
' [(1., 3.), (2., 4.)]>, r_in=3.0 deg, r_out=5.0 deg)>')
a_str = ('Aperture: SkyCircularAnnulus\npositions: <SkyCoord '
'(ICRS): (ra, dec) in deg\n [(1., 3.), (2., 4.)]>\n'
'r_in: 3.0 deg\nr_out: 5.0 deg')
assert repr(aper) == a_repr
assert str(aper) == a_str
aper = SkyEllipticalAperture(s, a=3*u.deg, b=5*u.deg, theta=15*u.deg)
a_repr = ('<SkyEllipticalAperture(<SkyCoord (ICRS): (ra, dec) in '
'deg\n [(1., 3.), (2., 4.)]>, a=3.0 deg, b=5.0 deg, '
'theta=15.0 deg)>')
a_str = ('Aperture: SkyEllipticalAperture\npositions: <SkyCoord '
'(ICRS): (ra, dec) in deg\n [(1., 3.), (2., 4.)]>\n'
'a: 3.0 deg\nb: 5.0 deg\ntheta: 15.0 deg')
assert repr(aper) == a_repr
assert str(aper) == a_str
aper = SkyEllipticalAnnulus(s, a_in=3*u.deg, a_out=5*u.deg, b_out=3*u.deg,
theta=15*u.deg)
a_repr = ('<SkyEllipticalAnnulus(<SkyCoord (ICRS): (ra, dec) in '
'deg\n [(1., 3.), (2., 4.)]>, a_in=3.0 deg, '
'a_out=5.0 deg, b_in=1.8 deg, b_out=3.0 deg, '
'theta=15.0 deg)>')
a_str = ('Aperture: SkyEllipticalAnnulus\npositions: <SkyCoord '
'(ICRS): (ra, dec) in deg\n [(1., 3.), (2., 4.)]>\n'
'a_in: 3.0 deg\na_out: 5.0 deg\nb_in: 1.8 deg\n'
'b_out: 3.0 deg\ntheta: 15.0 deg')
assert repr(aper) == a_repr
assert str(aper) == a_str
aper = SkyRectangularAperture(s, w=3*u.deg, h=5*u.deg, theta=15*u.deg)
a_repr = ('<SkyRectangularAperture(<SkyCoord (ICRS): (ra, dec) in '
'deg\n [(1., 3.), (2., 4.)]>, w=3.0 deg, h=5.0 deg'
', theta=15.0 deg)>')
a_str = ('Aperture: SkyRectangularAperture\npositions: <SkyCoord '
'(ICRS): (ra, dec) in deg\n [(1., 3.), (2., 4.)]>\n'
'w: 3.0 deg\nh: 5.0 deg\ntheta: 15.0 deg')
assert repr(aper) == a_repr
assert str(aper) == a_str
aper = SkyRectangularAnnulus(s, w_in=5*u.deg, w_out=10*u.deg,
h_out=6*u.deg, theta=15*u.deg)
a_repr = ('<SkyRectangularAnnulus(<SkyCoord (ICRS): (ra, dec) in deg'
'\n [(1., 3.), (2., 4.)]>, w_in=5.0 deg, '
'w_out=10.0 deg, h_in=3.0 deg, h_out=6.0 deg, '
'theta=15.0 deg)>')
a_str = ('Aperture: SkyRectangularAnnulus\npositions: <SkyCoord '
'(ICRS): (ra, dec) in deg\n [(1., 3.), (2., 4.)]>\n'
'w_in: 5.0 deg\nw_out: 10.0 deg\nh_in: 3.0 deg\n'
'h_out: 6.0 deg\ntheta: 15.0 deg')
assert repr(aper) == a_repr
assert str(aper) == a_str
def test_rectangular_bbox():
# odd sizes
width = 7
height = 3
a = RectangularAperture((50, 50), w=width, h=height, theta=0)
assert a.bbox.shape == (height, width)
a = RectangularAperture((50.5, 50.5), w=width, h=height, theta=0)
assert a.bbox.shape == (height + 1, width + 1)
a = RectangularAperture((50, 50), w=width, h=height, theta=90.*np.pi/180.)
assert a.bbox.shape == (width, height)
# even sizes
width = 8
height = 4
a = RectangularAperture((50, 50), w=width, h=height, theta=0)
assert a.bbox.shape == (height + 1, width + 1)
a = RectangularAperture((50.5, 50.5), w=width, h=height, theta=0)
assert a.bbox.shape == (height, width)
a = RectangularAperture((50.5, 50.5), w=width, h=height,
theta=90.*np.pi/180.)
assert a.bbox.shape == (width, height)
def test_elliptical_bbox():
# integer axes
a = 7
b = 3
ap = EllipticalAperture((50, 50), a=a, b=b, theta=0)
assert ap.bbox.shape == (2*b + 1, 2*a + 1)
ap = EllipticalAperture((50.5, 50.5), a=a, b=b, theta=0)
assert ap.bbox.shape == (2*b, 2*a)
ap = EllipticalAperture((50, 50), a=a, b=b, theta=90.*np.pi/180.)
assert ap.bbox.shape == (2*a + 1, 2*b + 1)
# fractional axes
a = 7.5
b = 4.5
ap = EllipticalAperture((50, 50), a=a, b=b, theta=0)
assert ap.bbox.shape == (2*b, 2*a)
ap = EllipticalAperture((50.5, 50.5), a=a, b=b, theta=0)
assert ap.bbox.shape == (2*b + 1, 2*a + 1)
ap = EllipticalAperture((50, 50), a=a, b=b, theta=90.*np.pi/180.)
assert ap.bbox.shape == (2*a, 2*b)
@pytest.mark.skipif('not HAS_GWCS')
@pytest.mark.parametrize('wcs_type', ('wcs', 'gwcs'))
def test_to_sky_pixel(wcs_type):
data = make_4gaussians_image()
if wcs_type == 'wcs':
wcs = make_wcs(data.shape)
elif wcs_type == 'gwcs':
wcs = make_gwcs(data.shape)
ap = CircularAperture(((12.3, 15.7), (48.19, 98.14)), r=3.14)
ap2 = ap.to_sky(wcs).to_pixel(wcs)
assert_allclose(ap.positions, ap2.positions)
assert_allclose(ap.r, ap2.r)
ap = CircularAnnulus(((12.3, 15.7), (48.19, 98.14)), r_in=3.14,
r_out=5.32)
ap2 = ap.to_sky(wcs).to_pixel(wcs)
assert_allclose(ap.positions, ap2.positions)
assert_allclose(ap.r_in, ap2.r_in)
assert_allclose(ap.r_out, ap2.r_out)
ap = EllipticalAperture(((12.3, 15.7), (48.19, 98.14)), a=3.14, b=5.32,
theta=103.*np.pi/180.)
ap2 = ap.to_sky(wcs).to_pixel(wcs)
assert_allclose(ap.positions, ap2.positions)
assert_allclose(ap.a, ap2.a)
assert_allclose(ap.b, ap2.b)
assert_allclose(ap.theta, ap2.theta)
ap = EllipticalAnnulus(((12.3, 15.7), (48.19, 98.14)), a_in=3.14,
a_out=15.32, b_out=4.89, theta=103.*np.pi/180.)
ap2 = ap.to_sky(wcs).to_pixel(wcs)
assert_allclose(ap.positions, ap2.positions)
assert_allclose(ap.a_in, ap2.a_in)
assert_allclose(ap.a_out, ap2.a_out)
assert_allclose(ap.b_out, ap2.b_out)
assert_allclose(ap.theta, ap2.theta)
ap = RectangularAperture(((12.3, 15.7), (48.19, 98.14)), w=3.14, h=5.32,
theta=103.*np.pi/180.)
ap2 = ap.to_sky(wcs).to_pixel(wcs)
assert_allclose(ap.positions, ap2.positions)
assert_allclose(ap.w, ap2.w)
assert_allclose(ap.h, ap2.h)
assert_allclose(ap.theta, ap2.theta)
ap = RectangularAnnulus(((12.3, 15.7), (48.19, 98.14)), w_in=3.14,
w_out=15.32, h_out=4.89, theta=103.*np.pi/180.)
ap2 = ap.to_sky(wcs).to_pixel(wcs)
assert_allclose(ap.positions, ap2.positions)
assert_allclose(ap.w_in, ap2.w_in)
assert_allclose(ap.w_out, ap2.w_out)
assert_allclose(ap.h_out, ap2.h_out)
assert_allclose(ap.theta, ap2.theta)
def test_position_units():
"""Regression test for unit check."""
pos = (10, 10) * u.pix
pos = np.sqrt(pos**2)
with pytest.warns(AstropyDeprecationWarning):
ap = CircularAperture(pos, r=3.)
assert_allclose(ap.positions, np.array([10, 10]))
def test_radius_units():
"""Regression test for unit check."""
pos = SkyCoord(10, 10, unit='deg')
r = 3.*u.pix
r = np.sqrt(r**2)
with pytest.warns(AstropyDeprecationWarning):
ap = SkyCircularAperture(pos, r=r)
assert ap.r.value == 3.0
assert ap.r.unit == u.pix
def test_scalar_aperture():
"""
Regression test to check that length-1 aperture list appends a "_0"
on the column names to be consistent with list inputs.
"""
data = np.ones((20, 20), dtype=float)
ap = CircularAperture((10, 10), r=3.)
colnames1 = aperture_photometry(data, ap, error=data).colnames
assert (colnames1 == ['id', 'xcenter', 'ycenter', 'aperture_sum',
'aperture_sum_err'])
colnames2 = aperture_photometry(data, [ap], error=data).colnames
assert (colnames2 == ['id', 'xcenter', 'ycenter', 'aperture_sum_0',
'aperture_sum_err_0'])
colnames3 = aperture_photometry(data, [ap, ap], error=data).colnames
assert (colnames3 == ['id', 'xcenter', 'ycenter', 'aperture_sum_0',
'aperture_sum_err_0', 'aperture_sum_1',
'aperture_sum_err_1'])
def test_nan_in_bbox():
"""
Regression test that non-finite data values outside of the aperture
mask but within the bounding box do not affect the photometry.
"""
data1 = np.ones((101, 101))
data2 = data1.copy()
data1[33, 33] = np.nan
data1[67, 67] = np.inf
data1[33, 67] = -np.inf
data1[22, 22] = np.nan
data1[22, 23] = np.inf
error = data1.copy()
aper1 = CircularAperture((50, 50), r=20.)
aper2 = CircularAperture((5, 5), r=20.)
tbl1 = aperture_photometry(data1, aper1, error=error)
tbl2 = aperture_photometry(data2, aper1, error=error)
| assert_allclose(tbl1['aperture_sum'], tbl2['aperture_sum']) | numpy.testing.assert_allclose |
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
# File: sample_questions_mgn.py
# Author: anon
# Email: <EMAIL>
# Created on: 2020-05-19
#
# This file is part of MGN
# Distributed under terms of the MIT License
import os, sys, platform
import argparse
import h5py
import numpy as np
| np.random.seed(42) | numpy.random.seed |
#!/usr/bin/python
# -*- coding: utf-8 -*-
try:
import unittest2 as unittest
except:
import unittest
import numpy as np
from pyrr import quaternion
class test_quaternion(unittest.TestCase):
# many of these values are taken from searches on wolfram alpha
def test_import(self):
import pyrr
pyrr.quaternion
from pyrr import quaternion
def test_create(self):
result = quaternion.create()
np.testing.assert_almost_equal(result, [0., 0., 0., 1.], decimal=5)
self.assertTrue(result.dtype == np.float)
def test_create_parameters(self):
result = quaternion.create(1.0, 2.0, 3.0, 4.0)
np.testing.assert_almost_equal(result, [1.0, 2.0, 3.0, 4.0], decimal=5)
self.assertTrue(result.dtype == np.float)
def test_create_from_x_rotation(self):
# 180 degree turn around X axis
q = quaternion.create_from_x_rotation(np.pi)
self.assertTrue(np.allclose(q, [1., 0., 0., 0.]))
# 90 degree rotation around X axis
q = quaternion.create_from_x_rotation(np.pi / 2.)
self.assertTrue(np.allclose(q, [np.sqrt(0.5), 0., 0., np.sqrt(0.5)]))
# -90 degree rotation around X axis
q = quaternion.create_from_x_rotation(-np.pi / 2.)
self.assertTrue(np.allclose(q, [-np.sqrt(0.5), 0., 0., np.sqrt(0.5)]))
def test_create_from_y_rotation(self):
# 180 degree turn around Y axis
q = quaternion.create_from_y_rotation(np.pi)
self.assertTrue(np.allclose(q, [0., 1., 0., 0.]))
# 90 degree rotation around Y axis
q = quaternion.create_from_y_rotation(np.pi / 2.)
self.assertTrue(np.allclose(q, [0., np.sqrt(0.5), 0., np.sqrt(0.5)]))
# -90 degree rotation around Y axis
q = quaternion.create_from_y_rotation(-np.pi / 2.)
def test_create_from_z_rotation(self):
# 180 degree turn around Z axis
q = quaternion.create_from_z_rotation(np.pi)
self.assertTrue(np.allclose(q, [0., 0., 1., 0.]))
# 90 degree rotation around Z axis
q = quaternion.create_from_z_rotation(np.pi / 2.)
self.assertTrue(np.allclose(q, [0., 0., np.sqrt(0.5), np.sqrt(0.5)]))
# -90 degree rotation around Z axis
q = quaternion.create_from_z_rotation(-np.pi / 2.)
def test_create_from_axis_rotation(self):
# wolfram alpha can be awesome sometimes
result = quaternion.create_from_axis_rotation([0.57735, 0.57735, 0.57735], np.pi)
np.testing.assert_almost_equal(result, [5.77350000e-01, 5.77350000e-01, 5.77350000e-01, 6.12323400e-17], decimal=3)
self.assertTrue(result.dtype == np.float)
def test_create_from_axis_rotation_non_normalized(self):
result = quaternion.create_from_axis_rotation([1., 1., 1.], np.pi)
np.testing.assert_almost_equal(result, [5.77350000e-01, 5.77350000e-01, 5.77350000e-01, 6.12323400e-17], decimal=3)
self.assertTrue(result.dtype == np.float)
def test_create_from_matrix_unit(self):
result = quaternion.create_from_matrix(np.eye(3))
np.testing.assert_almost_equal(result, [0., 0., 0., 1.], decimal=5)
self.assertTrue(result.dtype == np.float)
def test_create_from_matrix_x(self):
result = quaternion.create_from_matrix([
[1., 0., 0.],
[0., -1., 0.],
[0., 0., -1.],
])
np.testing.assert_almost_equal(result, [1., 0., 0., 0.], decimal=5)
self.assertTrue(result.dtype == np.float)
def test_create_from_matrix_y(self):
result = quaternion.create_from_matrix([
[-1., 0., 0.],
[0., 1., 0.],
[0., 0., -1.],
])
np.testing.assert_almost_equal(result, [0., 1., 0., 0.], decimal=5)
self.assertTrue(result.dtype == np.float)
def test_create_from_matrix_z(self):
result = quaternion.create_from_matrix([
[-1., 0., 0.],
[0., -1., 0.],
[0., 0., 1.],
])
np.testing.assert_almost_equal(result, [0., 0., 1., 0.], decimal=5)
self.assertTrue(result.dtype == np.float)
@unittest.skip('Not implemented')
def test_create_from_eulers(self):
pass
@unittest.skip('Not implemented')
def test_create_from_inverse_of_eulers(self):
pass
def test_cross(self):
q1 = quaternion.create_from_x_rotation(np.pi / 2.0)
q2 = quaternion.create_from_x_rotation(-np.pi / 2.0)
result = quaternion.cross(q1, q2)
np.testing.assert_almost_equal(result, quaternion.create(), decimal=5)
def test_quaternion_slerp(self):
sqrt2 = np.sqrt(2) / 2
identity = np.array([0.0, 0.0, 0.0, 1.0])
y90rot = np.array([0.0, sqrt2, 0.0, sqrt2])
y180rot = np.array([0.0, 1.0, 0.0, 0.0])
# Testing a == 0
# Must be id
result = quaternion.slerp(identity, y90rot, 0.0)
np.testing.assert_almost_equal(result, identity, decimal=4)
# Testing a == 1
# Must be 90° rotation on Y : 0 0.7 0 0.7
result = quaternion.slerp(identity, y90rot, 1.0)
np.testing.assert_almost_equal(result, y90rot, decimal=4)
# Testing standard, easy case
# Must be 45° rotation on Y : 0 0.38 0 0.92
y45rot1 = quaternion.slerp(identity, y90rot, 0.5)
# Testing reverse case
# Must be 45° rotation on Y : 0 0.38 0 0.92
y45rot2 = quaternion.slerp(y90rot, identity, 0.5)
np.testing.assert_almost_equal(y45rot1, y45rot2, decimal=4)
# Testing against full circle around the sphere instead of shortest path
# Must be 45° rotation on Y
# certainly not a 135° rotation
# y45rot3 = quaternion.slerp(identity, quaternion.negate(y90rot), 0.5)
y45rot3 = quaternion.slerp(identity, y90rot, 0.5)
y45angle3 = quaternion.rotation_angle(y45rot3)
np.testing.assert_almost_equal(y45angle3 * 180 / np.pi, 45, decimal=4)
np.testing.assert_almost_equal(y45angle3, np.pi / 4, decimal=4)
# # Same, but inverted
# # Must also be 45° rotation on Y : 0 0.38 0 0.92
# # -0 -0.38 -0 -0.92 is ok too
y45rot4 = quaternion.slerp(-y90rot, identity, 0.5)
np.testing.assert_almost_equal(np.abs(y45rot4), y45rot2, decimal=4)
# # Testing q1 = q2
# # Must be 90° rotation on Y : 0 0.7 0 0.7
y90rot3 = quaternion.slerp(y90rot, y90rot, 0.5);
np.testing.assert_almost_equal(y90rot3, y90rot, decimal=4)
# # Testing 180° rotation
# # Must be 90° rotation on almost any axis that is on the XZ plane
xz90rot = quaternion.slerp(identity, -y90rot, 0.5)
xz90rot = quaternion.rotation_angle(xz90rot)
np.testing.assert_almost_equal(xz90rot, np.pi / 4, decimal=4)
def test_is_zero_length(self):
result = quaternion.is_zero_length([1., 0., 0., 0.])
self.assertFalse(result)
def test_is_zero_length_zero(self):
result = quaternion.is_zero_length([0., 0., 0., 0.])
self.assertTrue(result)
def test_is_non_zero_length(self):
result = quaternion.is_non_zero_length([1., 0., 0., 0.])
self.assertTrue(result)
def test_is_non_zero_length_zero(self):
result = quaternion.is_non_zero_length([0., 0., 0., 0.])
self.assertFalse(result)
def test_squared_length_identity(self):
result = quaternion.squared_length([0., 0., 0., 1.])
np.testing.assert_almost_equal(result, 1., decimal=5)
def test_squared_length(self):
result = quaternion.squared_length([1., 1., 1., 1.])
np.testing.assert_almost_equal(result, 4., decimal=5)
def test_squared_length_batch(self):
result = quaternion.squared_length([
[0., 0., 0., 1.],
[1., 1., 1., 1.],
])
np.testing.assert_almost_equal(result, [1., 4.], decimal=5)
def test_length_identity(self):
result = quaternion.length([0., 0., 0., 1.])
np.testing.assert_almost_equal(result, 1., decimal=5)
def test_length(self):
result = quaternion.length([1., 1., 1., 1.])
np.testing.assert_almost_equal(result, 2., decimal=5)
def test_length_batch(self):
result = quaternion.length([
[0., 0., 0., 1.],
[1., 1., 1., 1.],
])
np.testing.assert_almost_equal(result, [1., 2.], decimal=5)
def test_normalize_identity(self):
# normalize an identity quaternion
result = quaternion.normalize([0., 0., 0., 1.])
np.testing.assert_almost_equal(result, [0., 0., 0., 1.], decimal=5)
def test_normalize_non_identity(self):
# normalize an identity quaternion
result = quaternion.normalize([1., 2., 3., 4.])
np.testing.assert_almost_equal(result, [1. / np.sqrt(30.), np.sqrt(2. / 15.), np.sqrt(3. / 10.), 2. * np.sqrt(2. / 15.)], decimal=5)
def test_normalize_batch(self):
# normalize an identity quaternion
result = quaternion.normalize([
[0., 0., 0., 1.],
[1., 2., 3., 4.],
])
expected = [
[0., 0., 0., 1.],
[1. / np.sqrt(30.), np.sqrt(2. / 15.), np.sqrt(3. / 10.), 2. * np.sqrt(2. / 15.)],
]
np.testing.assert_almost_equal(result, expected, decimal=5)
def test_rotation_angle(self):
result = quaternion.rotation_angle([5.77350000e-01, 5.77350000e-01, 5.77350000e-01, 6.12323400e-17])
np.testing.assert_almost_equal(result, np.pi, decimal=5)
def test_rotation_axis(self):
result = quaternion.rotation_axis([5.77350000e-01, 5.77350000e-01, 5.77350000e-01, 6.12323400e-17])
np.testing.assert_almost_equal(result, [0.57735, 0.57735, 0.57735], decimal=5)
def test_dot_adjacent(self):
result = quaternion.dot([1., 0., 0., 0.], [0., 1., 0., 0.])
np.testing.assert_almost_equal(result, 0.0, decimal=5)
def test_dot_parallel(self):
result = quaternion.dot([0., 1., 0., 0.], [0., 1., 0., 0.])
np.testing.assert_almost_equal(result, 1.0, decimal=5)
def test_dot_angle(self):
result = quaternion.dot([.2, .2, 0., 0.], [2., -.2, 0., 0.])
np.testing.assert_almost_equal(result, 0.36, decimal=5)
def test_dot_batch(self):
result = quaternion.dot([
[1., 0., 0., 0.],
[0., 1., 0., 0.],
[.2, .2, 0., 0.]
], [
[0., 1., 0., 0.],
[0., 1., 0., 0.],
[2., -.2, 0., 0.]
])
expected = [0., 1., 0.36]
np.testing.assert_almost_equal(result, expected, decimal=5)
def test_conjugate(self):
#result = quaternion.conjugate([5.77350000e-01, 5.77350000e-01, 5.77350000e-01, 6.12323400e-17])
result = quaternion.conjugate([0., 0., 0., 1.])
np.testing.assert_almost_equal(result, [0., 0., 0., 1.], decimal=5)
def test_conjugate_rotation(self):
result = quaternion.conjugate([5.77350000e-01, 5.77350000e-01, 5.77350000e-01, 6.12323400e-17])
np.testing.assert_almost_equal(result, [-0.57735, -0.57735, -0.57735, 6.12323e-17], decimal=5)
@unittest.skip('Not implemented')
def test_power(self):
pass
def test_inverse(self):
result = quaternion.inverse([0., 0., 0., 1.])
np.testing.assert_almost_equal(result, [0., 0., 0., 1.], decimal=5)
def test_inverse_rotation(self):
result = quaternion.inverse([5.77350000e-01, 5.77350000e-01, 5.77350000e-01, 6.12323400e-17])
np.testing.assert_almost_equal(result, [-0.577351, -0.577351, -0.577351, 6.12324e-17], decimal=5)
def test_inverse_non_unit(self):
q = [1, 2, 3, 4]
result = quaternion.inverse(q)
expected = quaternion.conjugate(q) / quaternion.length(q)
np.testing.assert_almost_equal(result, expected, decimal=5)
def test_negate_unit(self):
result = quaternion.negate([0., 0., 0., 1.])
np.testing.assert_almost_equal(result, [0., 0., 0., -1.], decimal=5)
def test_negate(self):
result = quaternion.negate([1., 2., 3., 4.])
np.testing.assert_almost_equal(result, [-1., -2., -3., -4.], decimal=5)
def test_apply_to_vector_unit_x(self):
result = quaternion.apply_to_vector([0., 0., 0., 1.], [1., 0., 0.])
np.testing.assert_almost_equal(result, [1., 0., 0.], decimal=5)
def test_apply_to_vector_x(self):
# 180 degree turn around X axis
q = quaternion.create_from_x_rotation(np.pi)
self.assertTrue(np.allclose(quaternion.apply_to_vector(q, [1., 0., 0.]), [1., 0., 0.]))
self.assertTrue(np.allclose(quaternion.apply_to_vector(q, [0., 1., 0.]), [0.,-1., 0.]))
self.assertTrue(np.allclose(quaternion.apply_to_vector(q, [0., 0., 1.]), [0., 0.,-1.]))
# 90 degree rotation around X axis
q = quaternion.create_from_x_rotation(np.pi / 2.)
self.assertTrue(np.allclose(quaternion.apply_to_vector(q, [1., 0., 0.]), [1., 0., 0.]))
self.assertTrue(np.allclose(quaternion.apply_to_vector(q, [0., 1., 0.]), [0., 0., 1.]))
self.assertTrue(np.allclose(quaternion.apply_to_vector(q, [0., 0., 1.]), [0.,-1., 0.]))
# -90 degree rotation around X axis
q = quaternion.create_from_x_rotation(-np.pi / 2.)
self.assertTrue(np.allclose(quaternion.apply_to_vector(q, [1., 0., 0.]), [1., 0., 0.]))
self.assertTrue(np.allclose(quaternion.apply_to_vector(q, [0., 1., 0.]), [0., 0.,-1.]))
self.assertTrue(np.allclose(quaternion.apply_to_vector(q, [0., 0., 1.]), [0., 1., 0.]))
def test_apply_to_vector_y(self):
# 180 degree turn around Y axis
q = quaternion.create_from_y_rotation(np.pi)
self.assertTrue(np.allclose(quaternion.apply_to_vector(q, [1., 0., 0.]), [-1., 0., 0.]))
self.assertTrue(np.allclose(quaternion.apply_to_vector(q, [0., 1., 0.]), [0., 1., 0.]))
self.assertTrue(np.allclose(quaternion.apply_to_vector(q, [0., 0., 1.]), [0., 0.,-1.]))
# 90 degree rotation around Y axis
q = quaternion.create_from_y_rotation(np.pi / 2.)
self.assertTrue(np.allclose(quaternion.apply_to_vector(q, [1., 0., 0.]), [0., 0.,-1.]))
self.assertTrue(np.allclose(quaternion.apply_to_vector(q, [0., 1., 0.]), [0., 1., 0.]))
self.assertTrue(np.allclose(quaternion.apply_to_vector(q, [0., 0., 1.]), [1., 0., 0.]))
# -90 degree rotation around Y axis
q = quaternion.create_from_y_rotation(-np.pi / 2.)
self.assertTrue(np.allclose(quaternion.apply_to_vector(q, [1., 0., 0.]), [0., 0., 1.]))
self.assertTrue(np.allclose(quaternion.apply_to_vector(q, [0., 1., 0.]), [0., 1., 0.]))
self.assertTrue(np.allclose(quaternion.apply_to_vector(q, [0., 0., 1.]), [-1., 0., 0.]))
def test_apply_to_vector_z(self):
# 180 degree turn around Z axis
q = quaternion.create_from_z_rotation(np.pi)
self.assertTrue(np.allclose(quaternion.apply_to_vector(q, [1., 0., 0.]), [-1., 0., 0.]))
self.assertTrue(np.allclose(quaternion.apply_to_vector(q, [0., 1., 0.]), [0.,-1., 0.]))
self.assertTrue(np.allclose(quaternion.apply_to_vector(q, [0., 0., 1.]), [0., 0., 1.]))
# 90 degree rotation around Z axis
q = quaternion.create_from_z_rotation(np.pi / 2.)
self.assertTrue(np.allclose(quaternion.apply_to_vector(q, [1., 0., 0.]), [0., 1., 0.]))
self.assertTrue(np.allclose(quaternion.apply_to_vector(q, [0., 1., 0.]), [-1., 0., 0.]))
self.assertTrue(np.allclose(quaternion.apply_to_vector(q, [0., 0., 1.]), [0., 0., 1.]))
# -90 degree rotation around Z axis
q = quaternion.create_from_z_rotation(-np.pi / 2.)
self.assertTrue(np.allclose(quaternion.apply_to_vector(q, [1., 0., 0.]), [0.,-1., 0.]))
self.assertTrue(np.allclose(quaternion.apply_to_vector(q, [0., 1., 0.]), [1., 0., 0.]))
self.assertTrue(np.allclose(quaternion.apply_to_vector(q, [0., 0., 1.]), [0., 0., 1.]))
def test_apply_to_vector_non_unit(self):
q = quaternion.create_from_x_rotation(np.pi)
# zero length
self.assertTrue(np.allclose(quaternion.apply_to_vector(q, [0., 0., 0.]), [0., 0., 0.]))
# >1 length
self.assertTrue(np.allclose(quaternion.apply_to_vector(q, [2., 0., 0.]), [2., 0., 0.]))
self.assertTrue(np.allclose(quaternion.apply_to_vector(q, [0., 2., 0.]), [0.,-2., 0.]))
self.assertTrue(np.allclose(quaternion.apply_to_vector(q, [0., 0., 2.]), [0., 0.,-2.]))
def test_identity(self):
# https://en.wikipedia.org/wiki/Quaternion
i = quaternion.create(1., 0., 0., 0.)
j = quaternion.create(0., 1., 0., 0.)
k = quaternion.create(0., 0., 1., 0.)
one = quaternion.create(0., 0., 0., 1.)
# i * 1 = i
# j * 1 = j
# k * 1 = k
# 1 * i = i
# 1 * j = j
# 1 * k = k
i1 = quaternion.cross(i, one)
j1 = quaternion.cross(j, one)
k1 = quaternion.cross(k, one)
_1i = quaternion.cross(one, i)
_1j = quaternion.cross(one, j)
_1k = quaternion.cross(one, k)
self.assertTrue(np.allclose(i1, _1i, i))
self.assertTrue(np.allclose(j1, _1j, j))
self.assertTrue(np.allclose(k1, _1k, k))
# result = -1
ii = quaternion.cross(i, i)
kk = quaternion.cross(k, k)
jj = quaternion.cross(j, j)
ijk = quaternion.cross(quaternion.cross(i, j), k)
self.assertTrue(np.allclose(ii, -one))
self.assertTrue(np.allclose(jj, -one))
self.assertTrue( | np.allclose(kk, -one) | numpy.allclose |
import numpy as np
import tensorflow as tf
from keras import backend as K
from tqdm import tqdm
def write_log(callback, names, logs, batch_no):
for name, value in zip(names, logs):
summary = tf.Summary()
summary_value = summary.value.add()
summary_value.simple_value = value
summary_value.tag = name
callback.writer.add_summary(summary, batch_no)
callback.writer.flush()
def fit_one_epoch(model_rpn, model_all, loss_history, callback, epoch, epoch_step, epoch_step_val, gen, gen_val, Epoch, anchors, bbox_util, roi_helper):
total_loss = 0
rpn_loc_loss = 0
rpn_cls_loss = 0
roi_loc_loss = 0
roi_cls_loss = 0
val_loss = 0
with tqdm(total=epoch_step,desc=f'Epoch {epoch + 1}/{Epoch}',postfix=dict,mininterval=0.3) as pbar:
for iteration, batch in enumerate(gen):
if iteration >= epoch_step:
break
X, Y, boxes = batch[0], batch[1], batch[2]
P_rpn = model_rpn.predict_on_batch(X)
results = bbox_util.detection_out_rpn(P_rpn, anchors)
roi_inputs = []
out_classes = []
out_regrs = []
for i in range(len(X)):
R = results[i]
X2, Y1, Y2 = roi_helper.calc_iou(R, boxes[i])
roi_inputs.append(X2)
out_classes.append(Y1)
out_regrs.append(Y2)
loss_class = model_all.train_on_batch([X, | np.array(roi_inputs) | numpy.array |
import matplotlib as mpl
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.patches import FancyArrowPatch
from mpl_toolkits.mplot3d import proj3d
import math as m
from numpy.linalg import inv
from func_dh_table import Tmat
class Arrow3D(FancyArrowPatch):
def __init__(self, xs, ys, zs, *args, **kwargs):
FancyArrowPatch.__init__(self, (0, 0), (0, 0), *args, **kwargs)
self._verts3d = xs, ys, zs
def draw(self, renderer):
xs3d, ys3d, zs3d = self._verts3d
xs, ys, zs = proj3d.proj_transform(xs3d, ys3d, zs3d, renderer.M)
self.set_positions((xs[0], ys[0]), (xs[1], ys[1]))
FancyArrowPatch.draw(self, renderer)
def Rx(theta):
return np.array([[1, 0, 0, 0],
[0, np.cos(theta), -np.sin(theta), 0],
[0, np.sin(theta), np.cos(theta), 0],
[0, 0, 0, 1]])
def Rz(theta):
return np.array([[np.cos(theta), -np.sin(theta), 0, 0],
[np.sin(theta), | np.cos(theta) | numpy.cos |
from __future__ import division, print_function, absolute_import
import numpy
from numpy import (atleast_1d, poly, polyval, roots, real, asarray,
resize, pi, absolute, logspace, r_, sqrt, tan, log10,
arctan, arcsinh, sin, exp, cosh, arccosh, ceil, conjugate,
zeros, sinh, append, concatenate, prod, ones, array,
mintypecode)
from .filters import *
from .helpers import fminbound, fmin, newton
from mpmath import ellipk, ellipfun, besselk
import numpy as np
__all__ = ['iirfilter', 'cheb1ord', 'cheb2ord', 'buttord']
def iirfilter(N, Wn, rp=None, rs=None, btype='band', analog=False,
ftype='butter', output='ba'):
"""
IIR digital and analog filter design given order and critical points.
Design an Nth-order digital or analog filter and return the filter
coefficients.
Parameters
----------
N : int
The order of the filter.
Wn : array_like
A scalar or length-2 sequence giving the critical frequencies.
For digital filters, `Wn` is normalized from 0 to 1, where 1 is the
Nyquist frequency, pi radians/sample. (`Wn` is thus in
half-cycles / sample.)
For analog filters, `Wn` is an angular frequency (e.g. rad/s).
rp : float, optional
For Chebyshev and elliptic filters, provides the maximum ripple
in the passband. (dB)
rs : float, optional
For Chebyshev and elliptic filters, provides the minimum attenuation
in the stop band. (dB)
btype : {'bandpass', 'lowpass', 'highpass', 'bandstop'}, optional
The type of filter. Default is 'bandpass'.
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned.
ftype : str, optional
The type of IIR filter to design:
- Butterworth : 'butter'
- Chebyshev I : 'cheby1'
- Chebyshev II : 'cheby2'
- Cauer/elliptic: 'ellip'
- Bessel/Thomson: 'bessel'
output : {'ba', 'zpk', 'sos'}, optional
Type of output: numerator/denominator ('ba') or pole-zero ('zpk').
Default is 'ba'.
Returns
-------
b, a : ndarray, ndarray
Numerator (`b`) and denominator (`a`) polynomials of the IIR filter.
Only returned if ``output='ba'``.
z, p, k : ndarray, ndarray, float
Zeros, poles, and system gain of the IIR filter transfer
function. Only returned if ``output='zpk'``.
sos : ndarray
Second-order sections representation of the IIR filter.
Only returned if ``output=='sos'``.
See Also
--------
butter : Filter design using order and critical points
cheby1, cheby2, ellip, bessel
buttord : Find order and critical points from passband and stopband spec
cheb1ord, cheb2ord, ellipord
iirdesign : General filter design using passband and stopband spec
Notes
-----
The ``'sos'`` output parameter was added in 0.16.0.
Examples
--------
Generate a 17th-order Chebyshev II bandpass filter and plot the frequency
response:
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> b, a = signal.iirfilter(17, [50, 200], rs=60, btype='band',
... analog=True, ftype='cheby2')
>>> w, h = signal.freqs(b, a, 1000)
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> ax.semilogx(w, 20 * np.log10(abs(h)))
>>> ax.set_title('Chebyshev Type II bandpass frequency response')
>>> ax.set_xlabel('Frequency [radians / second]')
>>> ax.set_ylabel('Amplitude [dB]')
>>> ax.axis((10, 1000, -100, 10))
>>> ax.grid(which='both', axis='both')
>>> plt.show()
"""
ftype, btype, output = [x.lower() for x in (ftype, btype, output)]
Wn = asarray(Wn)
try:
btype = band_dict[btype]
except KeyError:
raise ValueError("'%s' is an invalid bandtype for filter." % btype)
try:
typefunc = filter_dict[ftype][0]
except KeyError:
raise ValueError("'%s' is not a valid basic IIR filter." % ftype)
if output not in ['ba', 'zpk']:
raise ValueError("'%s' is not a valid output form." % output)
if rp is not None and rp < 0:
raise ValueError("passband ripple (rp) must be positive")
if rs is not None and rs < 0:
raise ValueError("stopband attenuation (rs) must be positive")
# Get analog lowpass prototype
if typefunc == buttap:
z, p, k = typefunc(N)
# elif typefunc == besselap:
# z, p, k = typefunc(N, norm=bessel_norms[ftype])
elif typefunc == cheb1ap:
if rp is None:
raise ValueError("passband ripple (rp) must be provided to "
"design a Chebyshev I filter.")
z, p, k = typefunc(N, rp)
elif typefunc == cheb2ap:
if rs is None:
raise ValueError("stopband attenuation (rs) must be provided to "
"design an Chebyshev II filter.")
z, p, k = typefunc(N, rs)
# elif typefunc == ellipap:
# if rs is None or rp is None:
# raise ValueError("Both rp and rs must be provided to design an "
# "elliptic filter.")
# z, p, k = typefunc(N, rp, rs)
else:
raise NotImplementedError("'%s' not implemented in iirfilter." % ftype)
# Pre-warp frequencies for digital filter design
if not analog:
if numpy.any(Wn <= 0) or numpy.any(Wn >= 1):
raise ValueError("Digital filter critical frequencies "
"must be 0 < Wn < 1")
fs = 2.0
warped = 2 * fs * tan(pi * Wn / fs)
else:
warped = Wn
# transform to lowpass, bandpass, highpass, or bandstop
if btype in ('lowpass', 'highpass'):
if numpy.size(Wn) != 1:
raise ValueError('Must specify a single critical frequency Wn')
if btype == 'lowpass':
z, p, k = _zpklp2lp(z, p, k, wo=warped)
elif btype == 'highpass':
z, p, k = _zpklp2hp(z, p, k, wo=warped)
elif btype in ('bandpass', 'bandstop'):
try:
bw = warped[1] - warped[0]
wo = sqrt(warped[0] * warped[1])
except IndexError:
raise ValueError('Wn must specify start and stop frequencies')
if btype == 'bandpass':
z, p, k = _zpklp2bp(z, p, k, wo=wo, bw=bw)
elif btype == 'bandstop':
z, p, k = _zpklp2bs(z, p, k, wo=wo, bw=bw)
else:
raise NotImplementedError("'%s' not implemented in iirfilter." % btype)
# Find discrete equivalent if necessary
if not analog:
z, p, k = _zpkbilinear(z, p, k, fs=fs)
# Transform to proper out type (pole-zero, state-space, numer-denom)
if output == 'zpk':
return z, p, k
elif output == 'ba':
return zpk2tf(z, p, k)
def buttap(N):
"""Return (z,p,k) for analog prototype of Nth-order Butterworth filter.
The filter will have an angular (e.g. rad/s) cutoff frequency of 1.
See Also
--------
butter : Filter design function using this prototype
"""
if abs(int(N)) != N:
raise ValueError("Filter order must be a nonnegative integer")
z = numpy.array([])
m = numpy.arange(-N+1, N, 2)
# Middle value is 0 to ensure an exactly real pole
p = -numpy.exp(1j * pi * m / (2 * N))
k = 1
return z, p, k
def cheb1ap(N, rp):
"""
Return (z,p,k) for Nth-order Chebyshev type I analog lowpass filter.
The returned filter prototype has `rp` decibels of ripple in the passband.
The filter's angular (e.g. rad/s) cutoff frequency is normalized to 1,
defined as the point at which the gain first drops below ``-rp``.
See Also
--------
cheby1 : Filter design function using this prototype
"""
if abs(int(N)) != N:
raise ValueError("Filter order must be a nonnegative integer")
elif N == 0:
# Avoid divide-by-zero error
# Even order filters have DC gain of -rp dB
return numpy.array([]), numpy.array([]), 10**(-rp/20)
z = numpy.array([])
# Ripple factor (epsilon)
eps = numpy.sqrt(10 ** (0.1 * rp) - 1.0)
mu = 1.0 / N * arcsinh(1 / eps)
# Arrange poles in an ellipse on the left half of the S-plane
m = numpy.arange(-N+1, N, 2)
theta = pi * m / (2*N)
p = -sinh(mu + 1j*theta)
k = numpy.prod(-p, axis=0).real
if N % 2 == 0:
k = k / sqrt((1 + eps * eps))
return z, p, k
def cheb2ap(N, rs):
"""
Return (z,p,k) for Nth-order Chebyshev type I analog lowpass filter.
The returned filter prototype has `rs` decibels of ripple in the stopband.
The filter's angular (e.g. rad/s) cutoff frequency is normalized to 1,
defined as the point at which the gain first reaches ``-rs``.
See Also
--------
cheby2 : Filter design function using this prototype
"""
if abs(int(N)) != N:
raise ValueError("Filter order must be a nonnegative integer")
elif N == 0:
# Avoid divide-by-zero warning
return numpy.array([]), numpy.array([]), 1
# Ripple factor (epsilon)
de = 1.0 / sqrt(10 ** (0.1 * rs) - 1)
mu = arcsinh(1.0 / de) / N
if N % 2:
m = numpy.concatenate((numpy.arange(-N+1, 0, 2),
numpy.arange(2, N, 2)))
else:
m = numpy.arange(-N+1, N, 2)
z = -conjugate(1j / sin(m * pi / (2.0 * N)))
# Poles around the unit circle like Butterworth
p = -exp(1j * pi * numpy.arange(-N+1, N, 2) / (2 * N))
# Warp into Chebyshev II
p = sinh(mu) * p.real + 1j * cosh(mu) * p.imag
p = 1.0 / p
k = (numpy.prod(-p, axis=0) / numpy.prod(-z, axis=0)).real
return z, p, k
def _zpkbilinear(z, p, k, fs):
"""
Return a digital filter from an analog one using a bilinear transform.
Transform a set of poles and zeros from the analog s-plane to the digital
z-plane using Tustin's method, which substitutes ``(z-1) / (z+1)`` for
``s``, maintaining the shape of the frequency response.
Parameters
----------
z : array_like
Zeros of the analog IIR filter transfer function.
p : array_like
Poles of the analog IIR filter transfer function.
k : float
System gain of the analog IIR filter transfer function.
fs : float
Sample rate, as ordinary frequency (e.g. hertz). No prewarping is
done in this function.
Returns
-------
z : ndarray
Zeros of the transformed digital filter transfer function.
p : ndarray
Poles of the transformed digital filter transfer function.
k : float
System gain of the transformed digital filter.
"""
z = atleast_1d(z)
p = atleast_1d(p)
degree = _relative_degree(z, p)
fs2 = 2.0*fs
# Bilinear transform the poles and zeros
z_z = (fs2 + z) / (fs2 - z)
p_z = (fs2 + p) / (fs2 - p)
# Any zeros that were at infinity get moved to the Nyquist frequency
z_z = append(z_z, -ones(degree))
# Compensate for gain change
k_z = k * real(prod(fs2 - z) / prod(fs2 - p))
return z_z, p_z, k_z
def _zpklp2lp(z, p, k, wo=1.0):
r"""
Transform a lowpass filter prototype to a different frequency.
Return an analog low-pass filter with cutoff frequency `wo`
from an analog low-pass filter prototype with unity cutoff frequency,
using zeros, poles, and gain ('zpk') representation.
Parameters
----------
z : array_like
Zeros of the analog IIR filter transfer function.
p : array_like
Poles of the analog IIR filter transfer function.
k : float
System gain of the analog IIR filter transfer function.
wo : float
Desired cutoff, as angular frequency (e.g. rad/s).
Defaults to no change.
Returns
-------
z : ndarray
Zeros of the transformed low-pass filter transfer function.
p : ndarray
Poles of the transformed low-pass filter transfer function.
k : float
System gain of the transformed low-pass filter.
Notes
-----
This is derived from the s-plane substitution
.. math:: s \rightarrow \frac{s}{\omega_0}
"""
z = atleast_1d(z)
p = atleast_1d(p)
wo = float(wo) # Avoid int wraparound
degree = _relative_degree(z, p)
# Scale all points radially from origin to shift cutoff frequency
z_lp = wo * z
p_lp = wo * p
# Each shifted pole decreases gain by wo, each shifted zero increases it.
# Cancel out the net change to keep overall gain the same
k_lp = k * wo**degree
return z_lp, p_lp, k_lp
def _zpklp2hp(z, p, k, wo=1.0):
r"""
Transform a lowpass filter prototype to a highpass filter.
Return an analog high-pass filter with cutoff frequency `wo`
from an analog low-pass filter prototype with unity cutoff frequency,
using zeros, poles, and gain ('zpk') representation.
Parameters
----------
z : array_like
Zeros of the analog IIR filter transfer function.
p : array_like
Poles of the analog IIR filter transfer function.
k : float
System gain of the analog IIR filter transfer function.
wo : float
Desired cutoff, as angular frequency (e.g. rad/s).
Defaults to no change.
Returns
-------
z : ndarray
Zeros of the transformed high-pass filter transfer function.
p : ndarray
Poles of the transformed high-pass filter transfer function.
k : float
System gain of the transformed high-pass filter.
Notes
-----
This is derived from the s-plane substitution
.. math:: s \rightarrow \frac{\omega_0}{s}
This maintains symmetry of the lowpass and highpass responses on a
logarithmic scale.
"""
z = atleast_1d(z)
p = atleast_1d(p)
wo = float(wo)
degree = _relative_degree(z, p)
# Invert positions radially about unit circle to convert LPF to HPF
# Scale all points radially from origin to shift cutoff frequency
z_hp = wo / z
p_hp = wo / p
# If lowpass had zeros at infinity, inverting moves them to origin.
z_hp = append(z_hp, zeros(degree))
# Cancel out gain change caused by inversion
k_hp = k * real(prod(-z) / prod(-p))
return z_hp, p_hp, k_hp
def _zpklp2bp(z, p, k, wo=1.0, bw=1.0):
r"""
Transform a lowpass filter prototype to a bandpass filter.
Return an analog band-pass filter with center frequency `wo` and
bandwidth `bw` from an analog low-pass filter prototype with unity
cutoff frequency, using zeros, poles, and gain ('zpk') representation.
Parameters
----------
z : array_like
Zeros of the analog IIR filter transfer function.
p : array_like
Poles of the analog IIR filter transfer function.
k : float
System gain of the analog IIR filter transfer function.
wo : float
Desired passband center, as angular frequency (e.g. rad/s).
Defaults to no change.
bw : float
Desired passband width, as angular frequency (e.g. rad/s).
Defaults to 1.
Returns
-------
z : ndarray
Zeros of the transformed band-pass filter transfer function.
p : ndarray
Poles of the transformed band-pass filter transfer function.
k : float
System gain of the transformed band-pass filter.
Notes
-----
This is derived from the s-plane substitution
.. math:: s \rightarrow \frac{s^2 + {\omega_0}^2}{s \cdot \mathrm{BW}}
This is the "wideband" transformation, producing a passband with
geometric (log frequency) symmetry about `wo`.
"""
z = atleast_1d(z)
p = atleast_1d(p)
wo = float(wo)
bw = float(bw)
degree = _relative_degree(z, p)
# Scale poles and zeros to desired bandwidth
z_lp = z * bw/2
p_lp = p * bw/2
# Square root needs to produce complex result, not NaN
z_lp = z_lp.astype(complex)
p_lp = p_lp.astype(complex)
# Duplicate poles and zeros and shift from baseband to +wo and -wo
z_bp = concatenate((z_lp + sqrt(z_lp**2 - wo**2),
z_lp - sqrt(z_lp**2 - wo**2)))
p_bp = concatenate((p_lp + | sqrt(p_lp**2 - wo**2) | numpy.sqrt |
import numpy as np
from dwave.system.samplers import DWaveSampler
# Numpy arrays should be row-major for best performance
class TrainEnv:
# require for the data to be passed as NP data arrays
# -> clear to both user and code what each array is
# X_train, y_train should be formatted like scikit-learn data arrays are
# -> X_train is the train data, y_train is the train labels
def __init__(
self,
X_train,
y_train,
endpoint_url,
account_token,
X_val=None,
y_val=None,
fidelity=7,
dwave_topology="pegasus",
):
"""
Configures the hyperparameters for the model. In essence, this controls how
the model learns.
Parameters:
- X_train Input Events x Params dataset (given in Scikit-learn's format).
- y_train Input Classification dataset (given in Scikit-learn's format).
- endpoint_url The url associated with the D-Wave machine desired.
- account_token Access token for D-Wave machines associated with your account.
- X_val (Optional) Validation Events x Params dataset
(given in Scikit-learn's format).
- y_val (Optional) Validation Classification dataset
(given in Scikit-learn's format).
- fidelity (Optional) Number of copies of parameter to make for zooming.
- dwave_topology (Optional) Architecture of the desired D-Wave machine.
(Possible options defined in D-Wave documentation.)
Environment Vars:
- X_train Dataset of Events x Params used solely for training.
- y_train Dataset of Classifcations used solely for training.
- X_val Dataset of Events x Params used solely for validation.
- y_val Dataset of Classifcations used solely for validation.
- train_size Number of events to train on (or per group if dataset is split).
- fidelity Number of copies of each parameter to make. The greater the fidelity,
the more complex of a decision boundary that could be formed.
- fidelity_offset Amount to shift each copy of a param. Should generally not be changed.
- c_i Input dataset after the param copies have been created and shifted.
- C_i c_i dotted with y_train row-wise.
- C_ij c_i dotted with itself row-wise.
- sampler Defines the characteristics for the desired D-Wave machine. Can be changed
after understanding the Ocean SDK.
"""
self.X_train = X_train
self.y_train = y_train
# if X_val is None:
# self.create_val_data()
# else:
# self.X_val = X_val
# self.y_val = y_val
self.train_size = np.shape(self.X_train)[0]
self.fidelity = fidelity
self.fidelity_offset = 0.0225 / fidelity
self.c_i = None
self.C_i = None
self.C_ij = None
self.data_preprocess()
self.sampler = DWaveSampler(
endpoint=endpoint_url,
token=account_token,
solver=dict(topology__type=dwave_topology),
auto_scale=True,
) # auto_scale set True by default
def create_val_data(self):
"""
Takes a small portion of the training data for validation (useful for
comparing performance of error-correction schemes).
"""
dummy_xt, dummy_xv = np.split(
self.X_train, [int(8 * np.size(self.X_train, 0) / 10)], 0
)
dummy_yt, dummy_yv = np.split(
self.y_train, [int(8 * np.size(self.y_train) / 10)]
)
self.X_train, self.X_val = np.array(list(dummy_xt)), np.array(list(dummy_xv))
self.y_train, self.y_val = np.array(list(dummy_yt)), np.array(list(dummy_yv))
def data_preprocess(self):
"""
This duplicates the parameters 'fidelity' times. The purpose is to turn the weak classifiers
from outputing a single number (-1 or 1) to outputting a binary array ([-1, 1, 1,...]). The
use of such a change is to trick the math into allowing more nuance between a weak classifier
that outputs 0.1 from a weak classifier that outputs 0.9 (the weak classifier outputs are continuous)
-> thereby discretizing the weak classifier's decision into more pieces than binary.
This then creates a periodic array to shift the outputs of the repeated weak classifier, so that there
is a meaning to duplicating them. You can think of each successive digit of the resulting weak classifier
output array as being more specific about what the continuous output was - ie >0, >0.1, >0.2 etc. This
description is not exactly correct in this case but it is the same idea as what we're doing.
"""
m_events, n_params = np.shape(
self.X_train
) # [M events (rows) x N parameters (columns)]
c_i = np.repeat(
self.X_train, repeats=self.fidelity, axis=1
) # [M events (rows) x N*fidelity parameters (columns)]
offset_array = self.fidelity_offset * (
np.tile( | np.arange(self.fidelity) | numpy.arange |
""" Module to run I/O tests on XSpectrum1D
"""
from __future__ import print_function, absolute_import, \
division, unicode_literals
import numpy as np
import os
import pytest
import astropy.io.ascii as ascii
from astropy import units as u
import astropy.table
from linetools.spectra import io
from linetools.spectra.xspectrum1d import XSpectrum1D
def data_path(filename):
data_dir = os.path.join(os.path.dirname(__file__), 'files')
return os.path.join(data_dir, filename)
def test_from_tuple():
tmp = ascii.read(data_path('UM184.dat.gz'), names=['wave', 'flux', 'sig'])
idl = dict(wave=np.array(tmp['wave']), flux=np.array(tmp['flux']),
sig=np.array(tmp['sig']))
spec = XSpectrum1D.from_tuple((idl['wave'],idl['flux'], idl['sig']))
#
np.testing.assert_allclose(spec.data['wave'][spec.select], idl['wave'])
np.testing.assert_allclose(spec.data['sig'][spec.select], idl['sig'], atol=2e-3, rtol=0)
assert spec.wavelength.unit == u.Unit('AA')
#
spec = XSpectrum1D.from_tuple((idl['wave'],idl['flux']))
np.testing.assert_allclose(spec.data['wave'][spec.select], idl['wave'])
# continuum
co = np.ones_like(idl['flux'])
spec = XSpectrum1D.from_tuple((idl['wave'],idl['flux'],idl['sig'], co))
np.testing.assert_allclose(spec.data['wave'][spec.select], idl['wave'])
co = None
spec = XSpectrum1D.from_tuple((idl['wave'],idl['flux'],idl['sig'], co))
| np.testing.assert_allclose(spec.data['wave'][spec.select], idl['wave']) | numpy.testing.assert_allclose |
# This code is modified from https://github.com/facebookresearch/low-shot-shrink-hallucinate
import torch
from PIL import Image
import numpy as np
import pandas as pd
import torchvision.transforms as transforms
import datasets.additional_transforms as add_transforms
from torch.utils.data import Dataset, DataLoader
from abc import abstractmethod
from PIL import ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True
import sys
sys.path.append("../")
from configs import *
idx = 0
class CustomDatasetFromImages(Dataset):
def __init__(self, csv_path= ISIC_path + "/ISIC2018_Task3_Training_GroundTruth/ISIC2018_Task3_Training_GroundTruth.csv", \
image_path = ISIC_path + "/ISIC2018_Task3_Training_Input/"):
print(csv_path)
"""
Args:
csv_path (string): path to csv file
img_path (string): path to the folder where images are
transform: pytorch transforms for transforms and tensor conversion
"""
self.img_path = image_path
self.csv_path = csv_path
# Transforms
self.to_tensor = transforms.ToTensor()
# Read the csv file
self.data_info = pd.read_csv(csv_path, skiprows=[0], header=None)
# First column contains the image paths
self.image_name = np.asarray(self.data_info.iloc[:, 0])
self.labels = | np.asarray(self.data_info.iloc[:, 1:]) | numpy.asarray |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# Imports
########################################
import sys, os
SciAnalysis_PATH='/home/kyager/current/code/SciAnalysis/main/'
SciAnalysis_PATH in sys.path or sys.path.append(SciAnalysis_PATH)
import glob
import numpy as np
from SciAnalysis import tools
from SciAnalysis.XSAnalysis.Data import *
#from SciAnalysis.XSAnalysis import Protocols
# Select file/conditions
root_dir = '/home/kyager/ppl/EmilyCranston/Kevin_DeFrance/2015_10Oct_18-SAXS_all/'
source_dir = os.path.join(root_dir, 'CNC_825_1p2T_kin/')
#source_dir = os.path.join(root_dir, 'CNC_825_0p56T_kin/')
#source_dir = os.path.join(root_dir, 'CNC_825_0T_kin/')
#source_dir = os.path.join(root_dir, 'CNC_413_1p2T_kin/')
#source_dir = os.path.join(root_dir, 'CNC_165_1p2T_kin/')
#source_dir = os.path.join(root_dir, 'CNC_165_0T_kin/')
#source_dir = os.path.join(root_dir, 'CNC_1p65_0p56T_1_kin/')
results_dir = os.path.join(source_dir, 'SciAnalysis/', 'results/')
output_dir = os.path.join(source_dir, 'SciAnalysis', 'trend/')
t_initial = 3 # min
t_step = 1 # min
#show = ['I', 'q', 'd', 'sigma', 'xi', 'eta', 'm', 'S'] # all
show = ['I', 'd', 'xi', 'eta', 'S'] # most useful
#show = ['q', 'sigma', 'm'] # aux
# Custom plot
class DataLinesStacked_current(DataLinesStacked):
def analyze(self, **run_args):
self.fit_lines = []
self.fit_results = []
for i, line in enumerate(self.lines):
lm_result, fit_line, fit_line_extended = self.fit_sigmoid(line, **run_args)
self.fit_lines.append(fit_line_extended)
result = [lm_result.params['x_center'].value,
lm_result.params['x_center'].stderr,
lm_result.params['x_scale'].value,
lm_result.params['x_scale'].stderr,
lm_result.params['b'].value,
lm_result.params['b'].stderr,
lm_result.params['prefactor'].value,
lm_result.params['prefactor'].stderr,
]
self.fit_results.append(result)
print( line.y_label )
tau1 = lm_result.params['x_center'].value
tau2 = np.abs(lm_result.params['x_scale'].value)
tau = lm_result.params['x_center'].value+np.abs(lm_result.params['x_scale'].value)
tauerr = np.sqrt( np.square(lm_result.params['x_center'].stderr) + np.square(lm_result.params['x_scale'].stderr) )
print( ' tau1 = {:.2f} min tau2 = {:.2f} min'.format(tau1, tau2) )
print( ' tau = {:.2f} +/- {:.2f} min'.format(tau, tauerr) )
If = lm_result.params['b'].value+lm_result.params['prefactor'].value
Iferr = np.sqrt( np.square(lm_result.params['b'].stderr) + np.square(lm_result.params['prefactor'].stderr) )
print( ' I_i, I,f = {:.3f}+/-{:.3f}, {:.3f}+/-{:.3f}'.format(lm_result.params['b'].value, lm_result.params['b'].stderr, If, Iferr ) )
print( ' <I>+/- = {} +/- {}'.format(np.average(line.y), np.std(line.y)) )
def fit_sigmoid(self, line, **run_args):
import lmfit
def model(v, x):
'''Eta orientation function.'''
m = v['b']
m += v['prefactor']*( 1./(1 + np.exp(-(x-v['x_center'])/v['x_scale'])) )
return m
def func2minimize(params, x, data):
v = params.valuesdict()
m = model(v, x)
return m - data
x_span = np.max(line.x)-np.min(line.x)
# Determine 'polarity'
line.sort_x()
y_start = np.average(line.y[:5])
y_end = np.average(line.y[-5:])
if y_start>y_end:
polarity = -1
else:
polarity = +1
params = lmfit.Parameters()
params.add('b', value=np.min(line.y), min=0, max=np.max(line.y))
params.add('prefactor', value=np.max(line.y)-np.min(line.y), min=0)
params.add('x_center', value=x_span, min=np.min(line.x), max=np.max(line.x))
params.add('x_scale', value=x_span*0.25*polarity)
lm_result = lmfit.minimize(func2minimize, params, args=(line.x, line.y))
if run_args['verbosity']>=5:
print('Fit results (lmfit):')
lmfit.report_fit(lm_result.params)
fit_x = line.x
fit_y = model(lm_result.params.valuesdict(), fit_x)
fit_line = DataLine(x=fit_x, y=fit_y, plot_args={'linestyle':'-', 'color':'r', 'marker':None, 'linewidth':4.0})
fit_x = np.linspace(min(np.min(line.x), 0), np.max(line.x)*2.0, num=1000)
fit_y = model(lm_result.params.valuesdict(), fit_x)
fit_line_extended = DataLine(x=fit_x, y=fit_y, plot_args={'linestyle':'-', 'color':'r', 'marker':None, 'linewidth':4.0})
return lm_result, fit_line, fit_line_extended
def _plot_extra(self, **plot_args):
if hasattr(self, 'fit_lines') and self.fit_lines is not None:
for i, fline in enumerate(self.fit_lines):
ax = getattr(self, 'ax{}'.format(i+1))
xi, xf, yi, yf = ax.axis()
p_args = dict([(i, fline.plot_args[i]) for i in self.plot_valid_keys if i in fline.plot_args])
p_args['color'] = 'b'
p_args['linewidth'] = 3.0
ax.plot(fline.x, fline.y, **p_args)
result = self.fit_results[i]
xpos = result[0]
xerr = result[1]
ax.axvline(xpos, color='b', linewidth=1.0)
ax.text(xpos, yf, '${:.0f}\pm{:.0f} \, \mathrm{{min}}$'.format(xpos, xerr), verticalalignment='top', horizontalalignment='left', color='b')
xpos = result[2] + xpos
xerr = result[3]
ax.axvline(xpos, color='purple', linewidth=1.0)
ax.text(xpos, yi, '${:.0f}\pm{:.0f} \, \mathrm{{min}}$'.format(xpos, xerr), verticalalignment='bottom', horizontalalignment='left', color='purple')
xpos = result[0] + result[2]*0.5
ax.text(xpos, yi+0.25*(yf-yi), '$+{:.0f} \, \mathrm{{min}}$'.format(result[2]), verticalalignment='center', horizontalalignment='center', color='purple')
for i, line in enumerate(self.lines):
ax = getattr(self, 'ax{}'.format(i+1))
xi, xf, yi, yf = ax.axis()
if line.y_label=='m':
ax.axis( [xi, xf, 0, yf] )
elif line.y_label=='eta':
ax.axis( [xi, xf, 0, yf] )
elif line.y_label=='S':
ax.axis( [xi, xf, 0, 1] )
#yticks = ax.get_yticks() # Positions
yticks = ax.yaxis.get_major_ticks() # Objects
yticks[0].label1.set_visible(False) # First
#yticks[-1].label1.set_visible(False) # Last
def load_data(infile):
# Extract data
import re
filename_re = re.compile('^.+_(\d+)\.xml$')
data = []
with open(infile) as fin:
for i, line in enumerate(fin.readlines()):
if i==0 and line[0]=='#':
headers = line[1:].split()
els = line.split()
m = filename_re.match(els[0].strip())
if m:
exposure_id = int(m.groups()[0])
time = t_initial + (exposure_id-1)*t_step
els[0] = time
els = [float(el) for el in els]
data.append(els)
return headers, | np.asarray(data) | numpy.asarray |
"""
SARIMAX Model
Author: <NAME>
License: Simplified-BSD
"""
from __future__ import division, absolute_import, print_function
from statsmodels.compat.python import long
from warnings import warn
import numpy as np
from .initialization import Initialization
from .mlemodel import MLEModel, MLEResults, MLEResultsWrapper
from .tools import (
companion_matrix, diff, is_invertible, constrain_stationary_univariate,
unconstrain_stationary_univariate, solve_discrete_lyapunov,
prepare_exog
)
from statsmodels.tools.tools import Bunch
from statsmodels.tools.data import _is_using_pandas
from statsmodels.tsa.tsatools import lagmat
from statsmodels.tools.decorators import cache_readonly
from statsmodels.tools.sm_exceptions import ValueWarning
import statsmodels.base.wrapper as wrap
class SARIMAX(MLEModel):
r"""
Seasonal AutoRegressive Integrated Moving Average with eXogenous regressors
model
Parameters
----------
endog : array_like
The observed time-series process :math:`y`
exog : array_like, optional
Array of exogenous regressors, shaped nobs x k.
order : iterable or iterable of iterables, optional
The (p,d,q) order of the model for the number of AR parameters,
differences, and MA parameters. `d` must be an integer
indicating the integration order of the process, while
`p` and `q` may either be an integers indicating the AR and MA
orders (so that all lags up to those orders are included) or else
iterables giving specific AR and / or MA lags to include. Default is
an AR(1) model: (1,0,0).
seasonal_order : iterable, optional
The (P,D,Q,s) order of the seasonal component of the model for the
AR parameters, differences, MA parameters, and periodicity.
`d` must be an integer indicating the integration order of the process,
while `p` and `q` may either be an integers indicating the AR and MA
orders (so that all lags up to those orders are included) or else
iterables giving specific AR and / or MA lags to include. `s` is an
integer giving the periodicity (number of periods in season), often it
is 4 for quarterly data or 12 for monthly data. Default is no seasonal
effect.
trend : str{'n','c','t','ct'} or iterable, optional
Parameter controlling the deterministic trend polynomial :math:`A(t)`.
Can be specified as a string where 'c' indicates a constant (i.e. a
degree zero component of the trend polynomial), 't' indicates a
linear trend with time, and 'ct' is both. Can also be specified as an
iterable defining the polynomial as in `numpy.poly1d`, where
`[1,1,0,1]` would denote :math:`a + bt + ct^3`. Default is to not
include a trend component.
measurement_error : boolean, optional
Whether or not to assume the endogenous observations `endog` were
measured with error. Default is False.
time_varying_regression : boolean, optional
Used when an explanatory variables, `exog`, are provided provided
to select whether or not coefficients on the exogenous regressors are
allowed to vary over time. Default is False.
mle_regression : boolean, optional
Whether or not to use estimate the regression coefficients for the
exogenous variables as part of maximum likelihood estimation or through
the Kalman filter (i.e. recursive least squares). If
`time_varying_regression` is True, this must be set to False. Default
is True.
simple_differencing : boolean, optional
Whether or not to use partially conditional maximum likelihood
estimation. If True, differencing is performed prior to estimation,
which discards the first :math:`s D + d` initial rows but results in a
smaller state-space formulation. If False, the full SARIMAX model is
put in state-space form so that all datapoints can be used in
estimation. Default is False.
enforce_stationarity : boolean, optional
Whether or not to transform the AR parameters to enforce stationarity
in the autoregressive component of the model. Default is True.
enforce_invertibility : boolean, optional
Whether or not to transform the MA parameters to enforce invertibility
in the moving average component of the model. Default is True.
hamilton_representation : boolean, optional
Whether or not to use the Hamilton representation of an ARMA process
(if True) or the Harvey representation (if False). Default is False.
**kwargs
Keyword arguments may be used to provide default values for state space
matrices or for Kalman filtering options. See `Representation`, and
`KalmanFilter` for more details.
Attributes
----------
measurement_error : boolean
Whether or not to assume the endogenous
observations `endog` were measured with error.
state_error : boolean
Whether or not the transition equation has an error component.
mle_regression : boolean
Whether or not the regression coefficients for
the exogenous variables were estimated via maximum
likelihood estimation.
state_regression : boolean
Whether or not the regression coefficients for
the exogenous variables are included as elements
of the state space and estimated via the Kalman
filter.
time_varying_regression : boolean
Whether or not coefficients on the exogenous
regressors are allowed to vary over time.
simple_differencing : boolean
Whether or not to use partially conditional maximum likelihood
estimation.
enforce_stationarity : boolean
Whether or not to transform the AR parameters
to enforce stationarity in the autoregressive
component of the model.
enforce_invertibility : boolean
Whether or not to transform the MA parameters
to enforce invertibility in the moving average
component of the model.
hamilton_representation : boolean
Whether or not to use the Hamilton representation of an ARMA process.
trend : str{'n','c','t','ct'} or iterable
Parameter controlling the deterministic
trend polynomial :math:`A(t)`. See the class
parameter documentation for more information.
polynomial_ar : array
Array containing autoregressive lag polynomial
coefficients, ordered from lowest degree to highest.
Initialized with ones, unless a coefficient is
constrained to be zero (in which case it is zero).
polynomial_ma : array
Array containing moving average lag polynomial
coefficients, ordered from lowest degree to highest.
Initialized with ones, unless a coefficient is
constrained to be zero (in which case it is zero).
polynomial_seasonal_ar : array
Array containing seasonal moving average lag
polynomial coefficients, ordered from lowest degree
to highest. Initialized with ones, unless a
coefficient is constrained to be zero (in which
case it is zero).
polynomial_seasonal_ma : array
Array containing seasonal moving average lag
polynomial coefficients, ordered from lowest degree
to highest. Initialized with ones, unless a
coefficient is constrained to be zero (in which
case it is zero).
polynomial_trend : array
Array containing trend polynomial coefficients,
ordered from lowest degree to highest. Initialized
with ones, unless a coefficient is constrained to be
zero (in which case it is zero).
k_ar : int
Highest autoregressive order in the model, zero-indexed.
k_ar_params : int
Number of autoregressive parameters to be estimated.
k_diff : int
Order of intergration.
k_ma : int
Highest moving average order in the model, zero-indexed.
k_ma_params : int
Number of moving average parameters to be estimated.
seasonal_periods : int
Number of periods in a season.
k_seasonal_ar : int
Highest seasonal autoregressive order in the model, zero-indexed.
k_seasonal_ar_params : int
Number of seasonal autoregressive parameters to be estimated.
k_seasonal_diff : int
Order of seasonal intergration.
k_seasonal_ma : int
Highest seasonal moving average order in the model, zero-indexed.
k_seasonal_ma_params : int
Number of seasonal moving average parameters to be estimated.
k_trend : int
Order of the trend polynomial plus one (i.e. the constant polynomial
would have `k_trend=1`).
k_exog : int
Number of exogenous regressors.
Notes
-----
The SARIMA model is specified :math:`(p, d, q) \times (P, D, Q)_s`.
.. math::
\phi_p (L) \tilde \phi_P (L^s) \Delta^d \Delta_s^D y_t = A(t) +
\theta_q (L) \tilde \theta_Q (L^s) \zeta_t
In terms of a univariate structural model, this can be represented as
.. math::
y_t & = u_t + \eta_t \\
\phi_p (L) \tilde \phi_P (L^s) \Delta^d \Delta_s^D u_t & = A(t) +
\theta_q (L) \tilde \theta_Q (L^s) \zeta_t
where :math:`\eta_t` is only applicable in the case of measurement error
(although it is also used in the case of a pure regression model, i.e. if
p=q=0).
In terms of this model, regression with SARIMA errors can be represented
easily as
.. math::
y_t & = \beta_t x_t + u_t \\
\phi_p (L) \tilde \phi_P (L^s) \Delta^d \Delta_s^D u_t & = A(t) +
\theta_q (L) \tilde \theta_Q (L^s) \zeta_t
this model is the one used when exogenous regressors are provided.
Note that the reduced form lag polynomials will be written as:
.. math::
\Phi (L) \equiv \phi_p (L) \tilde \phi_P (L^s) \\
\Theta (L) \equiv \theta_q (L) \tilde \theta_Q (L^s)
If `mle_regression` is True, regression coefficients are treated as
additional parameters to be estimated via maximum likelihood. Otherwise
they are included as part of the state with a diffuse initialization.
In this case, however, with approximate diffuse initialization, results
can be sensitive to the initial variance.
This class allows two different underlying representations of ARMA models
as state space models: that of Hamilton and that of Harvey. Both are
equivalent in the sense that they are analytical representations of the
ARMA model, but the state vectors of each have different meanings. For
this reason, maximum likelihood does not result in identical parameter
estimates and even the same set of parameters will result in different
loglikelihoods.
The Harvey representation is convenient because it allows integrating
differencing into the state vector to allow using all observations for
estimation.
In this implementation of differenced models, the Hamilton representation
is not able to accomodate differencing in the state vector, so
`simple_differencing` (which performs differencing prior to estimation so
that the first d + sD observations are lost) must be used.
Many other packages use the Hamilton representation, so that tests against
Stata and R require using it along with simple differencing (as Stata
does).
Detailed information about state space models can be found in [1]_. Some
specific references are:
- Chapter 3.4 describes ARMA and ARIMA models in state space form (using
the Harvey representation), and gives references for basic seasonal
models and models with a multiplicative form (for example the airline
model). It also shows a state space model for a full ARIMA process (this
is what is done here if `simple_differencing=False`).
- Chapter 3.6 describes estimating regression effects via the Kalman filter
(this is performed if `mle_regression` is False), regression with
time-varying coefficients, and regression with ARMA errors (recall from
above that if regression effects are present, the model estimated by this
class is regression with SARIMA errors).
- Chapter 8.4 describes the application of an ARMA model to an example
dataset. A replication of this section is available in an example
IPython notebook in the documentation.
References
----------
.. [1] Durbin, James, and <NAME>. 2012.
Time Series Analysis by State Space Methods: Second Edition.
Oxford University Press.
"""
def __init__(self, endog, exog=None, order=(1, 0, 0),
seasonal_order=(0, 0, 0, 0), trend=None,
measurement_error=False, time_varying_regression=False,
mle_regression=True, simple_differencing=False,
enforce_stationarity=True, enforce_invertibility=True,
hamilton_representation=False, **kwargs):
# Model parameters
self.seasonal_periods = seasonal_order[3]
self.measurement_error = measurement_error
self.time_varying_regression = time_varying_regression
self.mle_regression = mle_regression
self.simple_differencing = simple_differencing
self.enforce_stationarity = enforce_stationarity
self.enforce_invertibility = enforce_invertibility
self.hamilton_representation = hamilton_representation
# Save given orders
self.order = order
self.seasonal_order = seasonal_order
# Enforce non-MLE coefficients if time varying coefficients is
# specified
if self.time_varying_regression and self.mle_regression:
raise ValueError('Models with time-varying regression coefficients'
' must integrate the coefficients as part of the'
' state vector, so that `mle_regression` must'
' be set to False.')
# Lag polynomials
# Assume that they are given from lowest degree to highest, that all
# degrees except for the constant are included, and that they are
# boolean vectors (0 for not included, 1 for included).
if isinstance(order[0], (int, long, np.integer)):
self.polynomial_ar = np.r_[1., np.ones(order[0])]
else:
self.polynomial_ar = np.r_[1., order[0]]
if isinstance(order[2], (int, long, np.integer)):
self.polynomial_ma = np.r_[1., np.ones(order[2])]
else:
self.polynomial_ma = np.r_[1., order[2]]
# Assume that they are given from lowest degree to highest, that the
# degrees correspond to (1*s, 2*s, ..., P*s), and that they are
# boolean vectors (0 for not included, 1 for included).
if isinstance(seasonal_order[0], (int, long, np.integer)):
self.polynomial_seasonal_ar = np.r_[
1., # constant
([0] * (self.seasonal_periods - 1) + [1]) * seasonal_order[0]
]
else:
self.polynomial_seasonal_ar = np.r_[
1., [0] * self.seasonal_periods * len(seasonal_order[0])
]
for i in range(len(seasonal_order[0])):
tmp = (i + 1) * self.seasonal_periods
self.polynomial_seasonal_ar[tmp] = seasonal_order[0][i]
if isinstance(seasonal_order[2], (int, long, np.integer)):
self.polynomial_seasonal_ma = np.r_[
1., # constant
([0] * (self.seasonal_periods - 1) + [1]) * seasonal_order[2]
]
else:
self.polynomial_seasonal_ma = np.r_[
1., [0] * self.seasonal_periods * len(seasonal_order[2])
]
for i in range(len(seasonal_order[2])):
tmp = (i + 1) * self.seasonal_periods
self.polynomial_seasonal_ma[tmp] = seasonal_order[2][i]
# Deterministic trend polynomial
self.trend = trend
if trend is None or trend == 'n':
self.polynomial_trend = np.ones((0))
elif trend == 'c':
self.polynomial_trend = np.r_[1]
elif trend == 't':
self.polynomial_trend = np.r_[0, 1]
elif trend == 'ct':
self.polynomial_trend = np.r_[1, 1]
else:
self.polynomial_trend = (np.array(trend) > 0).astype(int)
# Model orders
# Note: k_ar, k_ma, k_seasonal_ar, k_seasonal_ma do not include the
# constant term, so they may be zero.
# Note: for a typical ARMA(p,q) model, p = k_ar_params = k_ar - 1 and
# q = k_ma_params = k_ma - 1, although this may not be true for models
# with arbitrary log polynomials.
self.k_ar = int(self.polynomial_ar.shape[0] - 1)
self.k_ar_params = int(np.sum(self.polynomial_ar) - 1)
self.k_diff = int(order[1])
self.k_ma = int(self.polynomial_ma.shape[0] - 1)
self.k_ma_params = int(np.sum(self.polynomial_ma) - 1)
self.k_seasonal_ar = int(self.polynomial_seasonal_ar.shape[0] - 1)
self.k_seasonal_ar_params = (
int(np.sum(self.polynomial_seasonal_ar) - 1)
)
self.k_seasonal_diff = int(seasonal_order[1])
self.k_seasonal_ma = int(self.polynomial_seasonal_ma.shape[0] - 1)
self.k_seasonal_ma_params = (
int(np.sum(self.polynomial_seasonal_ma) - 1)
)
# Make internal copies of the differencing orders because if we use
# simple differencing, then we will need to internally use zeros after
# the simple differencing has been performed
self._k_diff = self.k_diff
self._k_seasonal_diff = self.k_seasonal_diff
# We can only use the Hamilton representation if differencing is not
# performed as a part of the state space
if (self.hamilton_representation and not (self.simple_differencing or
self._k_diff == self._k_seasonal_diff == 0)):
raise ValueError('The Hamilton representation is only available'
' for models in which there is no differencing'
' integrated into the state vector. Set'
' `simple_differencing` to True or set'
' `hamilton_representation` to False')
# Note: k_trend is not the degree of the trend polynomial, because e.g.
# k_trend = 1 corresponds to the degree zero polynomial (with only a
# constant term).
self.k_trend = int(np.sum(self.polynomial_trend))
# Model order
# (this is used internally in a number of locations)
self._k_order = max(self.k_ar + self.k_seasonal_ar,
self.k_ma + self.k_seasonal_ma + 1)
if self._k_order == 1 and self.k_ar + self.k_seasonal_ar == 0:
# Handle time-varying regression
if self.time_varying_regression:
self._k_order = 0
# Exogenous data
(self.k_exog, exog) = prepare_exog(exog)
# Redefine mle_regression to be true only if it was previously set to
# true and there are exogenous regressors
self.mle_regression = (
self.mle_regression and exog is not None and self.k_exog > 0
)
# State regression is regression with coefficients estiamted within
# the state vector
self.state_regression = (
not self.mle_regression and exog is not None and self.k_exog > 0
)
# If all we have is a regression (so k_ar = k_ma = 0), then put the
# error term as measurement error
if self.state_regression and self._k_order == 0:
self.measurement_error = True
# Number of states
k_states = self._k_order
if not self.simple_differencing:
k_states += (self.seasonal_periods * self._k_seasonal_diff +
self._k_diff)
if self.state_regression:
k_states += self.k_exog
# Number of diffuse states
k_diffuse_states = k_states
if self.enforce_stationarity:
k_diffuse_states -= self._k_order
# Number of positive definite elements of the state covariance matrix
k_posdef = int(self._k_order > 0)
# Only have an error component to the states if k_posdef > 0
self.state_error = k_posdef > 0
if self.state_regression and self.time_varying_regression:
k_posdef += self.k_exog
# Diffuse initialization can be more sensistive to the variance value
# in the case of state regression, so set a higher than usual default
# variance
if self.state_regression:
kwargs.setdefault('initial_variance', 1e10)
# Number of parameters
self.k_params = (
self.k_ar_params + self.k_ma_params +
self.k_seasonal_ar_params + self.k_seasonal_ar_params +
self.k_trend +
self.measurement_error + 1
)
if self.mle_regression:
self.k_params += self.k_exog
# We need to have an array or pandas at this point
self.orig_endog = endog
self.orig_exog = exog
if not _is_using_pandas(endog, None):
endog = np.asanyarray(endog)
# Update the differencing dimensions if simple differencing is applied
self.orig_k_diff = self._k_diff
self.orig_k_seasonal_diff = self._k_seasonal_diff
if (self.simple_differencing and
(self._k_diff > 0 or self._k_seasonal_diff > 0)):
self._k_diff = 0
self._k_seasonal_diff = 0
# Internally used in several locations
self._k_states_diff = (
self._k_diff + self.seasonal_periods * self._k_seasonal_diff
)
# Set some model variables now so they will be available for the
# initialize() method, below
self.nobs = len(endog)
self.k_states = k_states
self.k_posdef = k_posdef
# By default, do not calculate likelihood while it is controlled by
# diffuse initial conditions.
kwargs.setdefault('loglikelihood_burn', k_diffuse_states)
# Initialize the statespace
super(SARIMAX, self).__init__(
endog, exog=exog, k_states=k_states, k_posdef=k_posdef, **kwargs
)
# Set as time-varying model if we have time-trend or exog
if self.k_exog > 0 or len(self.polynomial_trend) > 1:
self.ssm._time_invariant = False
# Initialize the fixed components of the statespace model
self.ssm['design'] = self.initial_design
self.ssm['state_intercept'] = self.initial_state_intercept
self.ssm['transition'] = self.initial_transition
self.ssm['selection'] = self.initial_selection
# update _init_keys attached by super
self._init_keys += ['order', 'seasonal_order', 'trend',
'measurement_error', 'time_varying_regression',
'mle_regression', 'simple_differencing',
'enforce_stationarity', 'enforce_invertibility',
'hamilton_representation'] + list(kwargs.keys())
# TODO: I think the kwargs or not attached, need to recover from ???
# Initialize the state
if self.ssm.initialization is None:
self.initialize_default()
def _get_init_kwds(self):
kwds = super(SARIMAX, self)._get_init_kwds()
for key, value in kwds.items():
if value is None and hasattr(self.ssm, key):
kwds[key] = getattr(self.ssm, key)
return kwds
def prepare_data(self):
endog, exog = super(SARIMAX, self).prepare_data()
# Perform simple differencing if requested
if (self.simple_differencing and
(self.orig_k_diff > 0 or self.orig_k_seasonal_diff > 0)):
# Save the original length
orig_length = endog.shape[0]
# Perform simple differencing
endog = diff(endog.copy(), self.orig_k_diff,
self.orig_k_seasonal_diff, self.seasonal_periods)
if exog is not None:
exog = diff(exog.copy(), self.orig_k_diff,
self.orig_k_seasonal_diff, self.seasonal_periods)
# Reset the ModelData datasets and cache
self.data.endog, self.data.exog = (
self.data._convert_endog_exog(endog, exog))
# Reset indexes, if provided
new_length = self.data.endog.shape[0]
if self.data.row_labels is not None:
self.data._cache['row_labels'] = (
self.data.row_labels[orig_length - new_length:])
if self._index is not None:
if self._index_generated:
self._index = self._index[:-(orig_length - new_length)]
else:
self._index = self._index[orig_length - new_length:]
# Reset the nobs
self.nobs = endog.shape[0]
# Cache the arrays for calculating the intercept from the trend
# components
time_trend = np.arange(1, self.nobs + 1)
self._trend_data = np.zeros((self.nobs, self.k_trend))
i = 0
for k in self.polynomial_trend.nonzero()[0]:
if k == 0:
self._trend_data[:, i] = np.ones(self.nobs,)
else:
self._trend_data[:, i] = time_trend**k
i += 1
return endog, exog
def initialize(self):
"""
Initialize the SARIMAX model.
Notes
-----
These initialization steps must occur following the parent class
__init__ function calls.
"""
super(SARIMAX, self).initialize()
# Cache the indexes of included polynomial orders (for update below)
# (but we do not want the index of the constant term, so exclude the
# first index)
self._polynomial_ar_idx = np.nonzero(self.polynomial_ar)[0][1:]
self._polynomial_ma_idx = np.nonzero(self.polynomial_ma)[0][1:]
self._polynomial_seasonal_ar_idx = np.nonzero(
self.polynomial_seasonal_ar
)[0][1:]
self._polynomial_seasonal_ma_idx = np.nonzero(
self.polynomial_seasonal_ma
)[0][1:]
# Save the indices corresponding to the reduced form lag polynomial
# parameters in the transition and selection matrices so that they
# don't have to be recalculated for each update()
start_row = self._k_states_diff
end_row = start_row + self.k_ar + self.k_seasonal_ar
col = self._k_states_diff
if not self.hamilton_representation:
self.transition_ar_params_idx = (
np.s_['transition', start_row:end_row, col]
)
else:
self.transition_ar_params_idx = (
np.s_['transition', col, start_row:end_row]
)
start_row += 1
end_row = start_row + self.k_ma + self.k_seasonal_ma
col = 0
if not self.hamilton_representation:
self.selection_ma_params_idx = (
np.s_['selection', start_row:end_row, col]
)
else:
self.design_ma_params_idx = (
np.s_['design', col, start_row:end_row]
)
# Cache indices for exog variances in the state covariance matrix
if self.state_regression and self.time_varying_regression:
idx = np.diag_indices(self.k_posdef)
self._exog_variance_idx = ('state_cov', idx[0][-self.k_exog:],
idx[1][-self.k_exog:])
def initialize_default(self, approximate_diffuse_variance=None):
if approximate_diffuse_variance is None:
approximate_diffuse_variance = self.ssm.initial_variance
init = Initialization(
self.k_states,
approximate_diffuse_variance=approximate_diffuse_variance)
if self.enforce_stationarity:
# Differencing operators are at the beginning
init.set((0, self._k_states_diff), 'approximate_diffuse')
# Stationary component in the middle
init.set((self._k_states_diff, self._k_states_diff + self._k_order),
'stationary')
# Regression components at the end
init.set((self._k_states_diff + self._k_order,
self._k_states_diff + self._k_order + self.k_exog),
'approximate_diffuse')
# If we're not enforcing a stationarity, then we can't initialize a
# stationary component
else:
init.set(None, 'approximate_diffuse')
self.ssm.initialization = init
@property
def initial_design(self):
"""Initial design matrix"""
# Basic design matrix
design = np.r_[
[1] * self._k_diff,
([0] * (self.seasonal_periods - 1) + [1]) * self._k_seasonal_diff,
[1] * self.state_error, [0] * (self._k_order - 1)
]
if len(design) == 0:
design = np.r_[0]
# If we have exogenous regressors included as part of the state vector
# then the exogenous data is incorporated as a time-varying component
# of the design matrix
if self.state_regression:
if self._k_order > 0:
design = np.c_[
np.reshape(
np.repeat(design, self.nobs),
(design.shape[0], self.nobs)
).T,
self.exog
].T[None, :, :]
else:
design = self.exog.T[None, :, :]
return design
@property
def initial_state_intercept(self):
"""Initial state intercept vector"""
# TODO make this self.k_trend > 1 and adjust the update to take
# into account that if the trend is a constant, it is not time-varying
if self.k_trend > 0:
state_intercept = np.zeros((self.k_states, self.nobs))
else:
state_intercept = np.zeros((self.k_states,))
return state_intercept
@property
def initial_transition(self):
"""Initial transition matrix"""
transition = np.zeros((self.k_states, self.k_states))
# Exogenous regressors component
if self.state_regression:
start = -self.k_exog
# T_\beta
transition[start:, start:] = np.eye(self.k_exog)
# Autoregressive component
start = -(self.k_exog + self._k_order)
end = -self.k_exog if self.k_exog > 0 else None
else:
# Autoregressive component
start = -self._k_order
end = None
# T_c
if self._k_order > 0:
transition[start:end, start:end] = companion_matrix(self._k_order)
if self.hamilton_representation:
transition[start:end, start:end] = np.transpose(
companion_matrix(self._k_order)
)
# Seasonal differencing component
# T^*
if self._k_seasonal_diff > 0:
seasonal_companion = companion_matrix(self.seasonal_periods).T
seasonal_companion[0, -1] = 1
for d in range(self._k_seasonal_diff):
start = self._k_diff + d * self.seasonal_periods
end = self._k_diff + (d + 1) * self.seasonal_periods
# T_c^*
transition[start:end, start:end] = seasonal_companion
# i
for i in range(d + 1, self._k_seasonal_diff):
transition[start, end + self.seasonal_periods - 1] = 1
# \iota
transition[start, self._k_states_diff] = 1
# Differencing component
if self._k_diff > 0:
idx = np.triu_indices(self._k_diff)
# T^**
transition[idx] = 1
# [0 1]
if self.seasonal_periods > 0:
start = self._k_diff
end = self._k_states_diff
transition[:self._k_diff, start:end] = (
([0] * (self.seasonal_periods - 1) + [1]) *
self._k_seasonal_diff)
# [1 0]
column = self._k_states_diff
transition[:self._k_diff, column] = 1
return transition
@property
def initial_selection(self):
"""Initial selection matrix"""
if not (self.state_regression and self.time_varying_regression):
if self.k_posdef > 0:
selection = np.r_[
[0] * (self._k_states_diff),
[1] * (self._k_order > 0), [0] * (self._k_order - 1),
[0] * ((1 - self.mle_regression) * self.k_exog)
][:, None]
if len(selection) == 0:
selection = np.zeros((self.k_states, self.k_posdef))
else:
selection = np.zeros((self.k_states, 0))
else:
selection = np.zeros((self.k_states, self.k_posdef))
# Typical state variance
if self._k_order > 0:
selection[0, 0] = 1
# Time-varying regression coefficient variances
for i in range(self.k_exog, 0, -1):
selection[-i, -i] = 1
return selection
@property
def _res_classes(self):
return {'fit': (SARIMAXResults, SARIMAXResultsWrapper)}
@staticmethod
def _conditional_sum_squares(endog, k_ar, polynomial_ar, k_ma,
polynomial_ma, k_trend=0, trend_data=None):
k = 2 * k_ma
r = max(k + k_ma, k_ar)
k_params_ar = 0 if k_ar == 0 else len(polynomial_ar.nonzero()[0]) - 1
k_params_ma = 0 if k_ma == 0 else len(polynomial_ma.nonzero()[0]) - 1
residuals = None
if k_ar + k_ma + k_trend > 0:
# If we have MA terms, get residuals from an AR(k) model to use
# as data for conditional sum of squares estimates of the MA
# parameters
if k_ma > 0:
Y = endog[k:]
X = lagmat(endog, k, trim='both')
params_ar = | np.linalg.pinv(X) | numpy.linalg.pinv |
import numpy as np
from scipy.spatial import ConvexHull
import matplotlib.pyplot as plt
from scipy.optimize import fmin
from scipy.interpolate import PchipInterpolator
from .ParticleGroupExtension import core_emit_calc
from .nicer_units import *
from .tools import scale_and_get_units
def emittance_vs_fraction(pg, var, number_of_points=25, plotting=True, verbose=False, show_core_emit_plot=False, title_fraction=[], title_emittance=[]):
# pg: Input ParticleGroup
# var: 'x' or 'y'
pg = copy.deepcopy(pg)
var1 = var
var2 = 'p' + var
# Check input and perform initializations:
x = getattr(pg, var1)
y = getattr(pg, var2)/pg.mass
w = pg.weight
(full_emittance, alpha, beta, center_x, center_y) = get_twiss(x, y, w)
fs = np.linspace(0,1,number_of_points)
es = np.zeros(number_of_points)
es[-1] = full_emittance
twiss_parameters = np.array([alpha, beta, center_x, center_y])
twiss_scales = np.abs(np.array([alpha, beta, np.max([1.0e-6, np.abs(center_x)]), np.max([1.0e-6, np.abs(center_y)])])) # scale of each fit parameter, helps simplex dimensions all be similar
normed_twiss_parameters = twiss_parameters/twiss_scales
aa = np.empty(len(fs))
bb = np.empty(len(fs))
cx = np.empty(len(fs))
cp = np.empty(len(fs))
aa[:] = np.nan
bb[:] = np.nan
cx[:] = np.nan
cp[:] = np.nan
# Computation of emittance vs. fractions
# Run through bounding ellipse areas (largest to smallest) and compute the
# enclosed fraction and emittance of inclosed beam. The Twiss parameters
# computed for the minimum bounding ellipse for the entire distribution is
# used as an initial guess:
if verbose:
print('')
print(' computing emittance vs. fraction curve...')
indices = np.arange(len(es)-2,1,-1)
for ind, ii in enumerate(indices):
# use previous ellipse as a guess point to compute next one:
twiss_parameter_guess = normed_twiss_parameters
normed_twiss_parameters = fmin(lambda xx: get_emit_at_frac(fs[ii],xx*twiss_scales,x,y,w), twiss_parameter_guess, args=(), maxiter=None, disp=verbose) # xtol=0.01, ftol=1,
es[ii] = get_emit_at_frac(fs[ii],normed_twiss_parameters*twiss_scales,x,y,w)
aa[ii] = normed_twiss_parameters[0]*twiss_scales[0]
bb[ii] = normed_twiss_parameters[1]*twiss_scales[1]
cx[ii] = normed_twiss_parameters[2]*twiss_scales[2]
cp[ii] = normed_twiss_parameters[3]*twiss_scales[3]
if verbose:
print(' ...done.')
# Compute core fraction and emittance:
if verbose:
print('')
print(' computing core emittance and fraction: ')
ec = core_emit_calc(x, y, w, show_fit=show_core_emit_plot)
if verbose:
print('done.')
fc = np.interp(ec,es,fs)
ac = np.interp(fc,fs,aa)
bc = np.interp(fc,fs,bb)
gc = (1.0+ac**2)/bc
# Plot results
if plotting:
if verbose:
print(' plotting data: ')
plot_points=100
base_units = 'm'
(es_plot, emit_units, emit_scale) = scale_and_get_units(es, base_units)
ec_plot = ec/emit_scale
fc1s = np.ones(plot_points)*fc
ec1s = np.linspace(0.0,1.0,plot_points)*ec_plot
ec2s = np.ones(plot_points)*ec_plot
fc2s = np.linspace(0.0,1.0,plot_points)
plt.figure(dpi=100)
plt.plot(fc1s, ec1s, 'r--')
plt.plot(fc2s, ec2s, 'r--')
plt.plot(fs, ec_plot*fs, 'r')
plt.plot(fs, es_plot, 'b.')
pchip = PchipInterpolator(fs, es_plot)
plt.plot(fc2s, pchip(fc2s), 'b-')
plt.xlim([0,1])
plt.ylim(bottom=0)
plt.xlabel('Fraction')
plt.ylabel(f'Emittance ({emit_units})')
title_str = f'$\epsilon_{{core}}$ = {ec_plot:.3g} {emit_units}, $f_{{core}}$ = {fc:.3f}'
if (title_fraction):
title_str = title_str + f', $\epsilon_{{{title_fraction}}}$ = {pchip(title_fraction):.3g} {emit_units}' # np.interp(title_fraction, fs, es)
plt.title(title_str)
if verbose:
print('done.')
return (es, fs, ec, fc)
def get_twiss(x, y, w):
w_sum = np.sum(w)
x0=np.sum(x*w)/w_sum
y0=np.sum(y*w)/w_sum
dx=x-x0
dy=y-y0
x2 = np.sum(dx**2*w)/w_sum
y2 = np.sum(dy**2*w)/w_sum
xy = np.sum(dx*dy*w)/w_sum
e=np.sqrt(x2*y2-xy**2)
a = -xy/e
b = x2/e
return (e,a,b,x0,y0)
def get_emit_at_frac(f_target, twiss_parameters, x, y, w):
alpha = twiss_parameters[0]
beta = twiss_parameters[1]
x0 = twiss_parameters[2]
y0 = twiss_parameters[3]
# subtract out centroids:
dx=x-x0
dy=y-y0
# compute and compare single particle emittances to emittance from Twiss parameters
gamma=(1.0+alpha**2)/beta
e_particles = 0.5*(gamma*dx**2 + beta*dy**2 + 2.0*alpha*dx*dy)
e_particles = np.sort(e_particles)
idx_target = int(np.floor(f_target * len(e_particles)))
frac_emit = np.sum(e_particles[0:idx_target])/(idx_target+1.0)
return frac_emit
# This function is no longer used, alas
def minboundellipse( x_all, y_all, tolerance=1.0e-3, plot_on=False):
# x_all and y_all are rows of points
# reduce set of points to just the convex hull of the input
ch = ConvexHull(np.array([x_all,y_all]).transpose())
x = x_all[ch.vertices]
y = y_all[ch.vertices]
d = 2
N = len(x)
P = np.array([x, y])
Q = np.array([x, y, np.ones(N)])
# Initialize
count = 1
err = 1
u = (1.0/N) * np.array([np.ones(N)]).transpose()
# Khachiyan Algorithm
while (err > tolerance):
X = Q @ np.diag(u.reshape(len(u))) @ Q.transpose()
M = np.diag(Q.transpose() @ np.linalg.solve(X, Q))
j = np.argmax(M)
maximum = M[j]
step_size = (maximum-d-1.0)/((d+1.0)*(maximum-1.0))
new_u = (1.0 - step_size)*u
new_u[j] = new_u[j] + step_size
err = np.linalg.norm(new_u - u)
count = count + 1
u = new_u
U = np.diag(u.reshape(len(u)))
# Compute the twiss parameters
A = (1.0/d) * np.linalg.inv(P @ U @ P.transpose() - (P @ u) @ (P @ u).transpose() )
(U, D, V) = np.linalg.svd(A)
a = 1/np.sqrt(D[0]) # major axis
b = 1/ | np.sqrt(D[1]) | numpy.sqrt |
from astropy.io import fits
import os
import numpy as np
import numpy
import DRE
from DRE.misc.h5py_compression import compression_types
from DRE.misc.interpolation import fit_parabola_1d
from DRE.misc.read_psf import get_psf
from scipy.signal import fftconvolve
class ModelsCube:
"""
This object stores the models and performs the convolution and fit operation, also computes the parameters for
the fit results and makes a mosaic to visualize it
Attributes
----------
models : ndarray
numpy/cupy array with the cube of models, the axes are sorted as (ax_ratio, angle, log_r, x_image, y_image)
convolved_models : ndarray
numpy/cupy array with the cube of models convolved with a PSF
header : dict
astropy header of the models fits file
original_shape : tuple
shape of the models as saved in the fits file (ax_ratio, angle x x_image, log_r x y_image)
src_index : ndarray
numpy array with the Sérsic index axis
log_r : ndarray
numpy array with the log_r axis
angle : ndarray
numpy array with the angle axis
ax_ratio : ndarray
numpy array with the ax_ratio axis
compression : dict
dictionary with arguments for H5Py compression
Methods
-------
load_models(models_file):
loads the models fits file and reshapes it
save_models(output_file):
saves the convolved models into a fits file
convolve(psf, *args, **kwargs):
convolves the models whit the psf, is implemented in the child classes ModelsCPU and ModelsGPU depending on the
acceleration method
dre_fit(data, segment, noise, backend=numpy)
performs the fit and returns a numpy/cupy array chi_squared residual between the model and the data inside the
segment
pond_rad_3d(chi_cube)
computes a weighted radius which weights are 1/chi_squared for each model, also computes a wighted variance
respect the minimum and the weighted radius, they are returned in log10 scale
get_parameters(chi_cube)
find the model that minimizes the chi_square and computes the parameters for this model
make_mosaic(data, segment, model_index)
makes a image with the data, the segment and the model, all scaled to the data flux
"""
def __init__(self, models_file=None, out_compression='none'):
"""
Parameters
----------
models_file : str
the path to the fits file with the models
out_compression : str
compression level for the HDF5 output file, can be 'none', 'low', 'medium' or 'high'
"""
self.models = None
self.convolved_models = None
self.header = None
self.original_shape = None
self.src_index = None
self.log_r = None
self.angle = None
self.ax_ratio = None
self.x_image = None
self.y_image = None
self.compression = compression_types[out_compression]
if models_file is None:
dre_dir = os.path.dirname(os.path.realpath(DRE.__file__))
models_file = os.path.join(dre_dir, 'models', 'modelbulge.fits')
self.load_models(models_file)
def __getitem__(self, index):
return self.models.__getitem__(index)
@property
def shape(self):
"""
Returns
-------
tuple
tuple with the shape of the models array
"""
return self.models.shape
@property
def axes_names(self):
"""
Returns
-------
tuple
name of each axis
"""
return 'src_index', 'ax_ratio', 'angle', 'log_r', 'x_image', 'y_image'
@property
def axes(self):
"""
Returns
-------
tuple
arrays of each axis
"""
return self.src_index, self.ax_ratio, self.angle, self.log_r, self.x_image, self.y_image
def load_models(self, models_file):
"""
loads the models fits file and reshapes it, also loads the header and computes the axes
Parameters
----------
models_file : str
the path to the fits file with the models
"""
self.header = fits.getheader(models_file)
cube = fits.getdata(models_file).astype('float')
self.original_shape = cube.shape
if "NINDEX" in self.header:
cube = cube.reshape(self.header["NINDEX"], self.header["NAXRAT"], self.header["NPOSANG"],
self.header["BOXSIZE"], self.header["NLOGH"], self.header["BOXSIZE"])
else:
# the old cube without Sérsic index
cube = cube.reshape(1, self.header["NAXRAT"], self.header["NPOSANG"],
self.header["BOXSIZE"], self.header["NLOGH"], self.header["BOXSIZE"])
self.header["INDEX0"] = 4.0
self.header["NINDEX"] = 1
self.header["DINDEX"] = 0
# swap log_r and x_image
cube = cube.swapaxes(-2, -3)
self.models = cube
self.src_index = np.arange(self.header["NINDEX"]) * self.header["DINDEX"] + self.header["INDEX0"]
self.log_r = np.arange(self.header["NLOGH"]) * self.header["DLOGH"] + self.header["LOGH0"]
self.angle = np.arange(self.header["NPOSANG"]) * self.header["DPOSANG"] + self.header["POSANG0"]
self.ax_ratio = np.arange(self.header["NAXRAT"]) * self.header["DAXRAT"] + self.header["AXRAT0"]
self.x_image = np.arange(self.header["BOXSIZE"])
self.y_image = np.arange(self.header["BOXSIZE"])
def save_models(self, output_file):
"""128
saves the convolved models into a fits file at the specified directory
Parameters
----------
output_file : str
the path to the fits file to save the models
"""
cube = self.convolved_models.swapaxes(-2, -3)
cube = cube.reshape(self.original_shape)
models_hdu = fits.ImageHDU(data=cube)
header_hdu = fits.PrimaryHDU(header=self.header)
hdul = fits.HDUList([header_hdu, models_hdu])
hdul.writeto(output_file, overwrite=True)
@staticmethod
def to_cpu(array):
return array
def convolve(self, psf, *args, **kwargs):
"""
convolves the models with the PSF and stores them in the convolved_models attribute,
is reimplemented in the child classes ModelsCPU and ModelsGPU depending on the acceleration method
Parameters
----------
psf : ndarray
array representing the PSF
"""
self.convolved_models = np.zeros(self.models.shape)
for i in range(self.convolved_models.shape[0]):
for j in range(self.convolved_models.shape[1]):
self.convolved_models[i, j] = fftconvolve(self.models[i, j], psf[np.newaxis, np.newaxis],
mode='same', axes=(-2, -1))
def dre_fit(self, data, segment, noise, backend=numpy):
"""
performs the fit with this steps:
- masks the models, the data and the noise with the segment,
all the following operations are only in the segment
- compute the models flux and the object flux, scales the model to match the object flux
- compute the residual between the model and the data divided by the variance for each pixel,
the variance is considered: sqrt(scaled_model)^2 + noise
- compute the chi-squared: sum the residuals and divide by the number of pixels
Parameters
----------
backend : module, optional
module tu use as backend, should be numpy or cupy
data : ndarray
numpy/cupy array corresponding to a science image cut with the object at the center
segment : ndarray
numpy/cupy array corresponding to a segmentation image cut
noise : ndarray
numpy/cupy array corresponding to a background RMS image cut
Returns
-------
ndarray
numpy/cupy array with the chi-square for each model
"""
# mask all elements, faster with index for large arrays
mask_idx = backend.where(segment)
models = self.convolved_models[..., mask_idx[0], mask_idx[1]]
data = data[mask_idx[0], mask_idx[1]]
noise = noise[mask_idx[0], mask_idx[1]]
flux_models = backend.sum(models, axis=-1)
flux_data = backend.nansum(data, axis=-1)
scale = flux_data / flux_models
models = scale[..., backend.newaxis] * models
chi = (data - models) ** 2 / (models + noise ** 2)
return backend.nanmean(chi, axis=-1)
def pond_rad_3d(self, chi_cube, log_r_min):
"""
DEPRECATION WARNING: this method is probably going to be deprecated in the near future
computes a weighted radius which weights are 1/chi_squared for each model, also computes a wighted variance
respect the minimum and the weighted radius
Parameters
---------
chi_cube : ndarray
numpy array with the chi_squared resulting from the DRE fit, if using cupy array it must be converted
to numpy array before using this method
log_r_min : float
value of log_r at the model which minimizes the chi_squared
Returns
-------
log_r_chi : float
log10 of the weighted radius
log_r_var : float
log10 of the variance respect to the radius of the optimal model
log_r_chi_var : float
log10 of the variance respect to the weighted radius
"""
r_chi = np.sum((10 ** self.log_r) / chi_cube)
r_chi = r_chi / np.sum(1. / chi_cube)
log_r_chi = np.log10(r_chi)
r_var = np.sum(((10 ** self.log_r - 10 ** log_r_min) ** 2) / chi_cube)
r_var = r_var / np.sum(1. / chi_cube)
log_r_var = np.log10(r_var)
r_chi_var = np.sum(((10 ** self.log_r - r_chi) ** 2) / chi_cube)
r_chi_var = r_chi_var / np.sum(1. / chi_cube)
log_r_chi_var = | np.log10(r_chi_var) | numpy.log10 |
"""
# Event source for MAGIC calibrated data files.
# Requires uproot package (https://github.com/scikit-hep/uproot).
"""
import re
import uproot
import logging
import scipy
import scipy.interpolate
import numpy as np
from decimal import Decimal
from enum import Enum, auto
from astropy.coordinates import Angle
from astropy import units as u
from astropy.time import Time
from ctapipe.io.eventsource import EventSource
from ctapipe.io.datalevels import DataLevel
from ctapipe.core import Container, Field
from ctapipe.core.traits import Bool
from ctapipe.coordinates import CameraFrame
from ctapipe.containers import (
ArrayEventContainer,
SimulatedEventContainer,
SimulatedShowerContainer,
SimulationConfigContainer,
PointingContainer,
TelescopePointingContainer,
TelescopeTriggerContainer,
MonitoringCameraContainer,
PedestalContainer,
)
from ctapipe.instrument import (
TelescopeDescription,
SubarrayDescription,
OpticsDescription,
CameraDescription,
CameraReadout,
)
from .version import __version__
from .constants import (
MC_STEREO_TRIGGER_PATTERN,
PEDESTAL_TRIGGER_PATTERN,
DATA_STEREO_TRIGGER_PATTERN
)
__all__ = ['MAGICEventSource', '__version__']
LOGGER = logging.getLogger(__name__)
degrees_per_hour = 15.0
seconds_per_hour = 3600.
msec2sec = 1e-3
nsec2sec = 1e-9
# MAGIC telescope positions in m wrt. to the center of CTA simulations
# MAGIC_TEL_POSITIONS = {
# 1: [-27.24, -146.66, 50.00] * u.m,
# 2: [-96.44, -96.77, 51.00] * u.m
# }
# MAGIC telescope positions in m wrt. to the center of MAGIC simulations, from
# CORSIKA and reflector input card
MAGIC_TEL_POSITIONS = {
1: [31.80, -28.10, 0.00] * u.m,
2: [-31.80, 28.10, 0.00] * u.m
}
# Magnetic field values at the MAGIC site (taken from CORSIKA input cards)
# Reference system is the CORSIKA one, where x-axis points to magnetic north
# i.e. B y-component is 0
# MAGIC_Bdec is the magnetic declination i.e. angle between magnetic and
# geographic north, negative if pointing westwards, positive if pointing
# eastwards
# MAGIC_Binc is the magnetic field inclination
MAGIC_Bx = u.Quantity(29.5, u.uT)
MAGIC_Bz = u.Quantity(23.0, u.uT)
MAGIC_Btot = np.sqrt(MAGIC_Bx**2+MAGIC_Bz**2)
MAGIC_Bdec = u.Quantity(-7.0, u.deg).to(u.rad)
MAGIC_Binc = u.Quantity(np.arctan2(-MAGIC_Bz.value, MAGIC_Bx.value), u.rad)
# MAGIC telescope description
OPTICS = OpticsDescription.from_name('MAGIC')
MAGICCAM = CameraDescription.from_name("MAGICCam")
pulse_shape_lo_gain = np.array([0., 1., 2., 1., 0.])
pulse_shape_hi_gain = np.array([1., 2., 3., 2., 1.])
pulse_shape = np.vstack((pulse_shape_lo_gain, pulse_shape_lo_gain))
MAGICCAM.readout = CameraReadout(
camera_name='MAGICCam',
sampling_rate=u.Quantity(1.64, u.GHz),
reference_pulse_shape=pulse_shape,
reference_pulse_sample_width=u.Quantity(0.5, u.ns)
)
MAGICCAM.geometry.frame = CameraFrame(focal_length=OPTICS.equivalent_focal_length)
GEOM = MAGICCAM.geometry
MAGIC_TEL_DESCRIPTION = TelescopeDescription(
name='MAGIC', tel_type='MAGIC', optics=OPTICS, camera=MAGICCAM)
MAGIC_TEL_DESCRIPTIONS = {1: MAGIC_TEL_DESCRIPTION, 2: MAGIC_TEL_DESCRIPTION}
class MARSDataLevel(Enum):
"""
Enum of the different MARS Data Levels
"""
CALIBRATED = auto() # Calibrated images in charge and time (no waveforms)
STAR = auto() # Cleaned images, with Hillas parametrization
SUPERSTAR = auto() # Stereo parameters reconstructed
MELIBEA = auto() # Reconstruction of hadronness, event direction and energy
class MissingDriveReportError(Exception):
"""
Exception raised when a subrun does not have drive reports.
"""
def __init__(self, message):
self.message = message
class MAGICEventSource(EventSource):
"""
EventSource for MAGIC calibrated data.
This class operates with the MAGIC data subrun-wise for calibrated data.
Attributes
----------
current_run : MarsCalibratedRun
Object containing the info needed to fill the ctapipe Containers
datalevel : DataLevel
Data level according to the definition in ctapipe
file_ : uproot.ReadOnlyFile
A ROOT file opened with uproot
is_mc : bool
Flag indicating real or simulated data
mars_datalevel : int
Data level according to MARS convention
metadata : dict
Dictionary containing metadata
run_numbers : int
Run number of the file
simulation_config : SimulationConfigContainer
Container filled with the information about the simulation
telescope : int
The number of the telescope
use_pedestals : bool
Flag indicating if pedestal events should be returned by the generator
"""
use_pedestals = Bool(
default_value=False,
help=(
'If true, extract pedestal evens instead of cosmic events.'
),
).tag(config=False)
def __init__(self, input_url=None, config=None, parent=None, **kwargs):
"""
Constructor
Parameters
----------
config: traitlets.loader.Config
Configuration specified by config file or cmdline arguments.
Used to set traitlet values.
Set to None if no configuration to pass.
parent : ctapipe.core.Tool
Tool executable that is calling this component.
Passes the correct logger to the component.
Set to None if no Tool to pass.
kwargs: dict
Additional parameters to be passed.
NOTE: The file mask of the data to read can be passed with
the 'input_url' parameter.
"""
super().__init__(input_url=input_url, config=config, parent=parent, **kwargs)
# Retrieving the list of run numbers corresponding to the data files
self.file_ = uproot.open(self.input_url.expanduser())
run_info = self.parse_run_info()
self.run_numbers = run_info[0]
self.is_mc = run_info[1]
self.telescope = run_info[2]
self.mars_datalevel = run_info[3]
self.metadata = self.parse_metadata_info()
# Retrieving the data level (so far HARDCODED Sorcerer)
self.datalevel = DataLevel.DL0
if self.is_mc:
self.simulation_config = self.parse_simulation_header()
if not self.is_mc:
self.is_stereo, self.is_sumt = self.parse_data_info()
# # Setting up the current run with the first run present in the data
# self.current_run = self._set_active_run(run_number=0)
self.current_run = None
self._subarray_info = SubarrayDescription(
name='MAGIC',
tel_positions=MAGIC_TEL_POSITIONS,
tel_descriptions=MAGIC_TEL_DESCRIPTIONS
)
if self.allowed_tels:
self._subarray_info = self._subarray_info.select_subarray(self.allowed_tels)
def __exit__(self, exc_type, exc_val, exc_tb):
"""
Releases resources (e.g. open files).
Parameters
----------
exc_type : Exception
Class of the exception
exc_val : BaseException
Type of the exception
exc_tb : TracebackType
The traceback
"""
self.close()
def close(self):
"""
Closes open ROOT file.
"""
self.file_.close()
@staticmethod
def is_compatible(file_path):
"""
This method checks if the specified file mask corresponds
to MAGIC data files. The result will be True only if all
the files are of ROOT format and contain an 'Events' tree.
Parameters
----------
file_path: str
Path to file
Returns
-------
bool:
True if the masked files are MAGIC data runs, False otherwise.
"""
is_magic_root_file = True
try:
with uproot.open(file_path) as input_data:
mandatory_trees = ['Events', 'RunHeaders', 'RunTails']
trees_in_file = [tree in input_data for tree in mandatory_trees]
if not all(trees_in_file):
is_magic_root_file = False
except ValueError:
# uproot raises ValueError if the file is not a ROOT file
is_magic_root_file = False
return is_magic_root_file
@staticmethod
def get_run_info_from_name(file_name):
"""
This internal method extracts the run number and
type (data/MC) from the specified file name.
Parameters
----------
file_name : str
A file name to process.
Returns
-------
run_number: int
The run number of the file.
is_mc: Bool
Flag to tag MC files
telescope: int
Number of the telescope
datalevel: MARSDataLevel
Data level according to MARS
Raises
------
IndexError
Description
"""
mask_data_calibrated = r"\d{6}_M(\d+)_(\d+)\.\d+_Y_.*"
mask_data_star = r"\d{6}_M(\d+)_(\d+)\.\d+_I_.*"
mask_data_superstar = r"\d{6}_(\d+)_S_.*"
mask_data_melibea = r"\d{6}_(\d+)_Q_.*"
mask_mc_calibrated = r"GA_M(\d)_za\d+to\d+_\d_(\d+)_Y_.*"
mask_mc_star = r"GA_M(\d)_za\d+to\d+_\d_(\d+)_I_.*"
mask_mc_superstar = r"GA_za\d+to\d+_\d_S_.*"
mask_mc_melibea = r"GA_za\d+to\d+_\d_Q_.*"
if re.findall(mask_data_calibrated, file_name):
parsed_info = re.findall(mask_data_calibrated, file_name)
telescope = int(parsed_info[0][0])
run_number = int(parsed_info[0][1])
datalevel = MARSDataLevel.CALIBRATED
is_mc = False
elif re.findall(mask_data_star, file_name):
parsed_info = re.findall(mask_data_star, file_name)
telescope = int(parsed_info[0][0])
run_number = int(parsed_info[0][1])
datalevel = MARSDataLevel.STAR
is_mc = False
elif re.findall(mask_data_superstar, file_name):
parsed_info = re.findall(mask_data_superstar, file_name)
telescope = None
run_number = int(parsed_info[0])
datalevel = MARSDataLevel.SUPERSTAR
is_mc = False
elif re.findall(mask_data_melibea, file_name):
parsed_info = re.findall(mask_data_melibea, file_name)
telescope = None
run_number = int(parsed_info[0])
datalevel = MARSDataLevel.MELIBEA
is_mc = False
elif re.findall(mask_mc_calibrated, file_name):
parsed_info = re.findall(mask_mc_calibrated, file_name)
telescope = int(parsed_info[0][0])
run_number = int(parsed_info[0][1])
datalevel = MARSDataLevel.CALIBRATED
is_mc = True
elif re.findall(mask_mc_star, file_name):
parsed_info = re.findall(mask_mc_star, file_name)
telescope = int(parsed_info[0][0])
run_number = int(parsed_info[0][1])
datalevel = MARSDataLevel.STAR
is_mc = True
elif re.findall(mask_mc_superstar, file_name):
parsed_info = re.findall(mask_mc_superstar, file_name)
telescope = None
run_number = None
datalevel = MARSDataLevel.SUPERSTAR
is_mc = True
elif re.findall(mask_mc_melibea, file_name):
parsed_info = re.findall(mask_mc_melibea, file_name)
telescope = None
run_number = None
datalevel = MARSDataLevel.MELIBEA
is_mc = True
else:
raise IndexError(
'Can not identify the run number and type (data/MC) of the file'
'{:s}'.format(file_name))
return run_number, is_mc, telescope, datalevel
def parse_run_info(self):
"""
Parses run info from the TTrees in the ROOT file
Returns
-------
run_number: int
The run number of the file
is_mc: Bool
Flag to tag MC files
telescope_number: int
Number of the telescope
datalevel: MARSDataLevel
Data level according to MARS
"""
runinfo_array_list = [
'MRawRunHeader.fRunNumber',
'MRawRunHeader.fRunType',
'MRawRunHeader.fTelescopeNumber',
]
run_info = self.file_['RunHeaders'].arrays(
runinfo_array_list, library="np")
run_number = int(run_info['MRawRunHeader.fRunNumber'][0])
run_type = int(run_info['MRawRunHeader.fRunType'][0])
telescope_number = int(run_info['MRawRunHeader.fTelescopeNumber'][0])
# a note about run numbers:
# mono data has run numbers starting with 1 or 2 (telescope dependent)
# stereo data has run numbers starting with 5
# if both telescopes are taking data with no L3,
# also in this case run number starts with 5 (e.g. muon runs)
# Here the data types (from MRawRunHeader.h)
# std data = 0
# pedestal = 1 (_P_)
# calibration = 2 (_C_)
# domino calibration = 3 (_L_)
# linearity calibration = 4 (_N_)
# point run = 7
# monteCarlo = 256
# none = 65535
mc_data_type = 256
if run_type == mc_data_type:
is_mc = True
else:
is_mc = False
events_tree = self.file_['Events']
melibea_trees = ['MHadronness', 'MStereoParDisp', 'MEnergyEst']
superstar_trees = ['MHillas_1', 'MHillas_2', 'MStereoPar']
star_trees = ['MHillas']
datalevel = MARSDataLevel.CALIBRATED
events_keys = events_tree.keys()
trees_in_file = [tree in events_keys for tree in melibea_trees]
if all(trees_in_file):
datalevel = MARSDataLevel.MELIBEA
trees_in_file = [tree in events_keys for tree in superstar_trees]
if all(trees_in_file):
datalevel = MARSDataLevel.SUPERSTAR
trees_in_file = [tree in events_keys for tree in star_trees]
if all(trees_in_file):
datalevel = MARSDataLevel.STAR
return run_number, is_mc, telescope_number, datalevel
def parse_data_info(self):
"""
Check if data is stereo/mono and std trigger/SUMT
Returns
-------
is_stereo: Bool
True if stereo data, False if mono
is_sumt: Bool
True if SUMT data, False if std trigger
"""
prescaler_mono_nosumt = [1, 1, 0, 1, 0, 0, 0, 0]
prescaler_mono_sumt = [0, 1, 0, 1, 0, 1, 0, 0]
prescaler_stereo = [0, 1, 0, 1, 0, 0, 0, 1]
# L1_table_mono = "L1_4NN"
# L1_table_stereo = "L1_3NN"
L3_table_nosumt = "L3T_L1L1_100_SYNC"
L3_table_sumt = "L3T_SUMSUM_100_SYNC"
trigger_tree = self.file_["Trigger"]
L3T_tree = self.file_["L3T"]
# here we take the 2nd element (if possible) because sometimes
# the first trigger report has still the old prescaler values from a previous run
try:
prescaler_array = trigger_tree["MTriggerPrescFact.fPrescFact"].array(library="np")
except AssertionError:
LOGGER.warning("No prescaler info found. Will assume standard stereo data.")
is_stereo = True
is_sumt = False
return is_stereo, is_sumt
prescaler_size = prescaler_array.size
if prescaler_size > 1:
prescaler = prescaler_array[1]
else:
prescaler = prescaler_array[0]
if prescaler == prescaler_mono_nosumt or prescaler == prescaler_mono_sumt:
is_stereo = False
elif prescaler == prescaler_stereo:
is_stereo = True
else:
is_stereo = True
is_sumt = False
if is_stereo:
# here we take the 2nd element for the same reason as above
# L3Table is empty for mono data i.e. taken with one telescope only
# if both telescopes take data with no L3, L3Table is filled anyway
L3Table_array = L3T_tree["MReportL3T.fTablename"].array(library="np")
L3Table_size = L3Table_array.size
if L3Table_size > 1:
L3Table = L3Table_array[1]
else:
L3Table = L3Table_array[0]
if L3Table == L3_table_sumt:
is_sumt = True
elif L3Table == L3_table_nosumt:
is_sumt = False
else:
is_sumt = False
else:
if prescaler == prescaler_mono_sumt:
is_sumt = True
return is_stereo, is_sumt
@staticmethod
def decode_version_number(version_encoded):
"""
Decodes the version number from an integer
Parameters
----------
version_encoded : int
Version number encoded as integer
Returns
-------
version_decoded: str
Version decoded as major.minor.patch
"""
major_version = version_encoded >> 16
minor_version = (version_encoded % 65536) >> 8
patch_version = (version_encoded % 65536) % 256
version_decoded = f'{major_version}.{minor_version}.{patch_version}'
return version_decoded
def parse_metadata_info(self):
"""
Parse metadata information from ROOT file
Returns
-------
metadata: dict
Dictionary containing the metadata information:
- run number
- real or simulated data
- telescope number
- subrun number
- source RA and DEC
- source name (real data only)
- observation mode (real data only)
- MARS version
- ROOT version
"""
metadatainfo_array_list_runheaders = [
'MRawRunHeader.fSubRunIndex',
'MRawRunHeader.fSourceRA',
'MRawRunHeader.fSourceDEC',
'MRawRunHeader.fSourceName[80]',
'MRawRunHeader.fObservationMode[60]',
]
metadatainfo_array_list_runtails = [
'MMarsVersion_sorcerer.fMARSVersionCode',
'MMarsVersion_sorcerer.fROOTVersionCode',
]
metadata = dict()
metadata['run_number'] = self.run_numbers
metadata['is_simulation'] = self.is_mc
metadata['telescope'] = self.telescope
meta_info_runh = self.file_['RunHeaders'].arrays(
metadatainfo_array_list_runheaders, library="np"
)
metadata['subrun_number'] = int(meta_info_runh['MRawRunHeader.fSubRunIndex'][0])
metadata['source_ra'] = meta_info_runh['MRawRunHeader.fSourceRA'][0] / \
seconds_per_hour * degrees_per_hour * u.deg
metadata['source_dec'] = meta_info_runh['MRawRunHeader.fSourceDEC'][0] / \
seconds_per_hour * u.deg
if not self.is_mc:
src_name_array = meta_info_runh['MRawRunHeader.fSourceName[80]'][0]
metadata['source_name'] = "".join([chr(item) for item in src_name_array if item != 0])
obs_mode_array = meta_info_runh['MRawRunHeader.fObservationMode[60]'][0]
metadata['observation_mode'] = "".join([chr(item) for item in obs_mode_array if item != 0])
meta_info_runt = self.file_['RunTails'].arrays(
metadatainfo_array_list_runtails, library="np"
)
mars_version_encoded = int(meta_info_runt['MMarsVersion_sorcerer.fMARSVersionCode'][0])
root_version_encoded = int(meta_info_runt['MMarsVersion_sorcerer.fROOTVersionCode'][0])
metadata['mars_version_sorcerer'] = self.decode_version_number(mars_version_encoded)
metadata['root_version_sorcerer'] = self.decode_version_number(root_version_encoded)
return metadata
def parse_simulation_header(self):
"""
Parse the simulation information from the RunHeaders tree.
Returns
-------
SimulationConfigContainer
Container filled with simulation information
Notes
-----
Information is extracted from the RunHeaders tree within the ROOT file.
Within it, the MMcCorsikaRunHeader and MMcRunHeader branches are used.
Here below the units of the members extracted, for reference:
* fSlopeSpec: float
* fELowLim, fEUppLim: GeV
* fCorsikaVersion: int
* fHeightLev[10]: centimeter
* fAtmosphericModel: int
* fRandomPointingConeSemiAngle: deg
* fImpactMax: centimeter
* fNumSimulatedShowers: int
* fShowerThetaMax, fShowerThetaMin: deg
* fShowerPhiMax, fShowerPhiMin: deg
* fCWaveUpper, fCWaveLower: nanometer
"""
run_header_tree = self.file_['RunHeaders']
spectral_index = run_header_tree['MMcCorsikaRunHeader.fSlopeSpec'].array(library="np")[0]
e_low = run_header_tree['MMcCorsikaRunHeader.fELowLim'].array(library="np")[0]
e_high = run_header_tree['MMcCorsikaRunHeader.fEUppLim'].array(library="np")[0]
corsika_version = run_header_tree['MMcCorsikaRunHeader.fCorsikaVersion'].array(library="np")[0]
site_height = run_header_tree['MMcCorsikaRunHeader.fHeightLev[10]'].array(library="np")[0][0]
atm_model = run_header_tree['MMcCorsikaRunHeader.fAtmosphericModel'].array(library="np")[0]
if self.mars_datalevel in [MARSDataLevel.CALIBRATED, MARSDataLevel.STAR]:
view_cone = run_header_tree['MMcRunHeader.fRandomPointingConeSemiAngle'].array(library="np")[0]
max_impact = run_header_tree['MMcRunHeader.fImpactMax'].array(library="np")[0]
n_showers = np.sum(run_header_tree['MMcRunHeader.fNumSimulatedShowers'].array(library="np"))
max_zd = run_header_tree['MMcRunHeader.fShowerThetaMax'].array(library="np")[0]
min_zd = run_header_tree['MMcRunHeader.fShowerThetaMin'].array(library="np")[0]
max_az = run_header_tree['MMcRunHeader.fShowerPhiMax'].array(library="np")[0]
min_az = run_header_tree['MMcRunHeader.fShowerPhiMin'].array(library="np")[0]
max_wavelength = run_header_tree['MMcRunHeader.fCWaveUpper'].array(library="np")[0]
min_wavelength = run_header_tree['MMcRunHeader.fCWaveLower'].array(library="np")[0]
elif self.mars_datalevel in [MARSDataLevel.SUPERSTAR, MARSDataLevel.MELIBEA]:
view_cone = run_header_tree['MMcRunHeader_1.fRandomPointingConeSemiAngle'].array(library="np")[0]
max_impact = run_header_tree['MMcRunHeader_1.fImpactMax'].array(library="np")[0]
n_showers = np.sum(run_header_tree['MMcRunHeader_1.fNumSimulatedShowers'].array(library="np"))
max_zd = run_header_tree['MMcRunHeader_1.fShowerThetaMax'].array(library="np")[0]
min_zd = run_header_tree['MMcRunHeader_1.fShowerThetaMin'].array(library="np")[0]
max_az = run_header_tree['MMcRunHeader_1.fShowerPhiMax'].array(library="np")[0]
min_az = run_header_tree['MMcRunHeader_1.fShowerPhiMin'].array(library="np")[0]
max_wavelength = run_header_tree['MMcRunHeader_1.fCWaveUpper'].array(library="np")[0]
min_wavelength = run_header_tree['MMcRunHeader_1.fCWaveLower'].array(library="np")[0]
return SimulationConfigContainer(
corsika_version=corsika_version,
energy_range_min=u.Quantity(e_low, u.GeV).to(u.TeV),
energy_range_max=u.Quantity(e_high, u.GeV).to(u.TeV),
prod_site_alt=u.Quantity(site_height, u.cm).to(u.m),
spectral_index=spectral_index,
num_showers=n_showers,
shower_reuse=1,
# shower_reuse not written in the magic root file, but since the
# sim_events already include shower reuse we artificially set it
# to 1 (actually every shower reused 5 times for std MAGIC MC)
shower_prog_id=1,
prod_site_B_total=MAGIC_Btot,
prod_site_B_declination=MAGIC_Bdec,
prod_site_B_inclination=MAGIC_Binc,
max_alt=u.Quantity((90. - min_zd), u.deg).to(u.rad),
min_alt=u.Quantity((90. - max_zd), u.deg).to(u.rad),
max_az=u.Quantity(max_az, u.deg).to(u.rad),
min_az=u.Quantity(min_az, u.deg).to(u.rad),
max_viewcone_radius=view_cone * u.deg,
min_viewcone_radius=0.0 * u.deg,
max_scatter_range=u.Quantity(max_impact, u.cm).to(u.m),
min_scatter_range=0.0 * u.m,
atmosphere=atm_model,
corsika_wlen_min=min_wavelength * u.nm,
corsika_wlen_max=max_wavelength * u.nm,
)
def _set_active_run(self, run_number):
"""
This internal method sets the run that will be used for data loading.
Parameters
----------
run_number: int
The run number to use.
Returns
-------
run: MarsRun
The run to use
"""
run = dict()
run['number'] = run_number
run['read_events'] = 0
if self.mars_datalevel == MARSDataLevel.CALIBRATED:
run['data'] = MarsCalibratedRun(self.file_, self.is_mc)
return run
@property
def subarray(self):
return self._subarray_info
@property
def is_simulation(self):
return self.is_mc
@property
def datalevels(self):
return (self.datalevel, )
@property
def obs_ids(self):
# ToCheck: will this be compatible in the future, e.g. with merged MC files
return [self.run_numbers]
def _generator(self):
"""
The default event generator. Return the stereo event
generator instance.
Returns
-------
"""
if self.mars_datalevel == MARSDataLevel.CALIBRATED:
if self.use_pedestals:
return self._pedestal_event_generator(telescope=f"M{self.telescope}")
else:
return self._mono_event_generator(telescope=f"M{self.telescope}")
def _stereo_event_generator(self):
"""
Stereo event generator. Yields DataContainer instances, filled
with the read event data.
Returns
-------
"""
counter = 0
# Data container - is initialized once, and data is replaced within it after each yield
data = ArrayEventContainer()
# Telescopes with data:
tels_in_file = ["m1", "m2"]
tels_with_data = [1, 2]
# Loop over the available data runs
for run_number in self.run_numbers:
# Removing the previously read data run from memory
if self.current_run is not None:
if 'data' in self.current_run:
del self.current_run['data']
# Setting the new active run (class MarsRun object)
self.current_run = self._set_active_run(run_number)
# Set monitoring data:
if not self.is_mc:
monitoring_data = self.current_run['data'].monitoring_data
for tel_i, tel_id in enumerate(tels_in_file):
monitoring_camera = MonitoringCameraContainer()
pedestal_info = PedestalContainer()
badpixel_info = PixelStatusContainer()
pedestal_info.sample_time = Time(
monitoring_data['M{:d}'.format(tel_i + 1)]['PedestalUnix'], format='unix', scale='utc'
)
# hardcoded number of pedestal events averaged over:
pedestal_info.n_events = 500
pedestal_info.charge_mean = []
pedestal_info.charge_mean.append(
monitoring_data['M{:d}'.format(tel_i + 1)]['PedestalFundamental']['Mean'])
pedestal_info.charge_mean.append(
monitoring_data['M{:d}'.format(tel_i + 1)]['PedestalFromExtractor']['Mean'])
pedestal_info.charge_mean.append(monitoring_data['M{:d}'.format(
tel_i + 1)]['PedestalFromExtractorRndm']['Mean'])
pedestal_info.charge_std = []
pedestal_info.charge_std.append(
monitoring_data['M{:d}'.format(tel_i + 1)]['PedestalFundamental']['Rms'])
pedestal_info.charge_std.append(
monitoring_data['M{:d}'.format(tel_i + 1)]['PedestalFromExtractor']['Rms'])
pedestal_info.charge_std.append(
monitoring_data['M{:d}'.format(tel_i + 1)]['PedestalFromExtractorRndm']['Rms'])
t_range = Time(monitoring_data['M{:d}'.format(tel_i + 1)]['badpixelinfoUnixRange'], format='unix', scale='utc')
badpixel_info.hardware_failing_pixels = monitoring_data['M{:d}'.format(tel_i + 1)]['badpixelinfo']
badpixel_info.sample_time_range = t_range
monitoring_camera.pedestal = pedestal_info
monitoring_camera.pixel_status = badpixel_info
data.mon.tels_with_data = [1, 2]
data.mon.tel[tel_i + 1] = monitoring_camera
else:
assert self.current_run['data'].mcheader_data['M1'] == self.current_run['data'].mcheader_data['M2'], "Simulation configurations are different for M1 and M2 !!!"
data.mcheader.num_showers = self.current_run['data'].mcheader_data['M1']['sim_nevents']
data.mcheader.shower_reuse = self.current_run['data'].mcheader_data['M1']['sim_reuse']
data.mcheader.energy_range_min = (self.current_run['data'].mcheader_data['M1']['sim_emin']).to(u.TeV) # GeV->TeV
data.mcheader.energy_range_max = (self.current_run['data'].mcheader_data['M1']['sim_emax']).to(u.TeV) # GeV->TeV
data.mcheader.spectral_index = self.current_run['data'].mcheader_data['M1']['sim_eslope']
data.mcheader.max_scatter_range = (self.current_run['data'].mcheader_data['M1']['sim_max_impact']).to(u.m) # cm->m
data.mcheader.max_viewcone_radius = (self.current_run['data'].mcheader_data['M1']['sim_conesemiangle']).to(u.deg)# deg->deg
if data.mcheader.max_viewcone_radius != 0.:
data.mcheader.diffuse = True
else:
data.mcheader.diffuse = False
# Loop over the events
for event_i in range(self.current_run['data'].n_stereo_events):
# Event and run ids
event_order_number = self.current_run['data'].stereo_ids[event_i][0]
event_id = self.current_run['data'].event_data['M1']['stereo_event_number'][event_order_number]
obs_id = self.current_run['number']
# Reading event data
event_data = self.current_run['data'].get_stereo_event_data(event_i)
data.meta['origin'] = 'MAGIC'
data.meta['input_url'] = self.input_url
data.meta['max_events'] = self.max_events
# Event counter
data.count = counter
data.index.obs_id = obs_id
data.index.event_id = event_id
# Setting up the R0 container
data.r0.tel.clear()
# Setting up the R1 container
data.r1.tel.clear()
# Setting up the DL0 container
data.dl0.tel.clear()
# Setting up the DL1 container
data.dl1.tel.clear()
pointing = PointingContainer()
# Filling the DL1 container with the event data
for tel_i, tel_id in enumerate(tels_in_file):
# Creating the telescope pointing container
pointing_tel = TelescopePointingContainer()
pointing_tel.azimuth = np.deg2rad(
event_data['{:s}_pointing_az'.format(tel_id)]) * u.rad
pointing_tel.altitude = np.deg2rad(
90 - event_data['{:s}_pointing_zd'.format(tel_id)]) * u.rad
# pointing.ra = np.deg2rad(
# event_data['{:s}_pointing_ra'.format(tel_id)]) * u.rad
# pointing.dec = np.deg2rad(
# event_data['{:s}_pointing_dec'.format(tel_id)]) * u.rad
pointing.tel[tel_i + 1] = pointing_tel
# Adding trigger id (MAGIC nomenclature)
data.r0.tel[tel_i + 1].trigger_type = self.current_run['data'].event_data['M1']['trigger_pattern'][event_order_number]
data.r1.tel[tel_i + 1].trigger_type = self.current_run['data'].event_data['M1']['trigger_pattern'][event_order_number]
data.dl0.tel[tel_i + 1].trigger_type = self.current_run['data'].event_data['M1']['trigger_pattern'][event_order_number]
# Adding event charge and peak positions per pixel
data.dl1.tel[tel_i +
1].image = event_data['{:s}_image'.format(tel_id)]
data.dl1.tel[tel_i +
1].peak_time = event_data['{:s}_pulse_time'.format(tel_id)]
pointing.array_azimuth = np.deg2rad(event_data['m1_pointing_az']) * u.rad
pointing.array_altitude = np.deg2rad(90 - event_data['m1_pointing_zd']) * u.rad
pointing.array_ra = np.deg2rad(event_data['m1_pointing_ra']) * u.rad
pointing.array_dec = np.deg2rad(event_data['m1_pointing_dec']) * u.rad
data.pointing = pointing
if not self.is_mc:
for tel_i, tel_id in enumerate(tels_in_file):
data.trigger.tel[tel_i + 1] = TelescopeTriggerContainer(
time=Time(event_data[f'{tel_id}_unix'], format='unix', scale='utc')
)
else:
data.mc.energy = event_data['true_energy'] * u.GeV
data.mc.alt = (np.pi/2 - event_data['true_zd']) * u.rad
# check meaning of 7deg transformation (I.Vovk)
data.mc.az = -1 * \
(event_data['true_az'] - np.deg2rad(180 - 7)) * u.rad
data.mc.shower_primary_id = 1 - \
event_data['true_shower_primary_id']
data.mc.h_first_int = event_data['true_h_first_int'] * u.cm
# adding a 7deg rotation between the orientation of corsika (x axis = magnetic north) and MARS (x axis = geographical north) frames
# magnetic north is 7 deg westward w.r.t. geographical north
rot_corsika = 7 *u.deg
data.mc.core_x = (event_data['true_core_x']*np.cos(rot_corsika) - event_data['true_core_y']*np.sin(rot_corsika))* u.cm
data.mc.core_y = (event_data['true_core_x']*np.sin(rot_corsika) + event_data['true_core_y']*np.cos(rot_corsika))* u.cm
# Setting the telescopes with data
data.r0.tels_with_data = tels_with_data
data.r1.tels_with_data = tels_with_data
data.dl0.tels_with_data = tels_with_data
data.trigger.tels_with_trigger = tels_with_data
yield data
counter += 1
return
def _mono_event_generator(self, telescope):
"""
Mono event generator. Yields DataContainer instances, filled
with the read event data.
Parameters
----------
telescope: str
The telescope for which to return events. Can be either "M1" or "M2".
Returns
-------
"""
counter = 0
telescope = telescope.upper()
# Data container - is initialized once, and data is replaced after each yield
data = ArrayEventContainer()
# Telescopes with data:
tels_in_file = ["M1", "M2"]
if telescope not in tels_in_file:
raise ValueError(f"Specified telescope {telescope} is not in the allowed list {tels_in_file}")
tel_i = tels_in_file.index(telescope)
tels_with_data = [tel_i + 1, ]
# Removing the previously read data run from memory
if self.current_run is not None:
if 'data' in self.current_run:
del self.current_run['data']
# Setting the new active run
self.current_run = self._set_active_run(self.run_numbers)
# Set monitoring data:
if not self.is_mc:
monitoring_data = self.current_run['data'].monitoring_data
monitoring_camera = MonitoringCameraContainer()
pedestal_info = PedestalContainer()
badpixel_info = PixelStatusContainer()
pedestal_info.sample_time = Time(
monitoring_data['M{:d}'.format(tel_i + 1)]['PedestalUnix'], format='unix', scale='utc'
)
pedestal_info.n_events = 500 # hardcoded number of pedestal events averaged over
pedestal_info.charge_mean = []
pedestal_info.charge_mean.append(
monitoring_data['M{:d}'.format(tel_i + 1)]['PedestalFundamental']['Mean'])
pedestal_info.charge_mean.append(
monitoring_data['M{:d}'.format(tel_i + 1)]['PedestalFromExtractor']['Mean'])
pedestal_info.charge_mean.append(monitoring_data['M{:d}'.format(
tel_i + 1)]['PedestalFromExtractorRndm']['Mean'])
pedestal_info.charge_std = []
pedestal_info.charge_std.append(
monitoring_data['M{:d}'.format(tel_i + 1)]['PedestalFundamental']['Rms'])
pedestal_info.charge_std.append(
monitoring_data['M{:d}'.format(tel_i + 1)]['PedestalFromExtractor']['Rms'])
pedestal_info.charge_std.append(
monitoring_data['M{:d}'.format(tel_i + 1)]['PedestalFromExtractorRndm']['Rms'])
t_range = Time(monitoring_data['M{:d}'.format(tel_i + 1)]['badpixelinfoUnixRange'], format='unix', scale='utc')
badpixel_info.hardware_failing_pixels = monitoring_data['M{:d}'.format(tel_i + 1)]['badpixelinfo']
badpixel_info.sample_time_range = t_range
monitoring_camera.pedestal = pedestal_info
monitoring_camera.pixel_status = badpixel_info
data.mon.tel[tel_i + 1] = monitoring_camera
if telescope == 'M1':
n_events = self.current_run['data'].n_mono_events_m1
else:
n_events = self.current_run['data'].n_mono_events_m2
# Loop over the events
for event_i in range(n_events):
# Event and run ids
event_order_number = self.current_run['data'].mono_ids[telescope][event_i]
event_id = self.current_run['data'].event_data[telescope]['stereo_event_number'][event_order_number]
obs_id = self.current_run['number']
# Reading event data
event_data = self.current_run['data'].get_mono_event_data(event_i, telescope=telescope)
data.meta['origin'] = 'MAGIC'
data.meta['input_url'] = self.input_url
data.meta['max_events'] = self.max_events
data.trigger.event_type = self.current_run['data'].event_data[telescope]['trigger_pattern'][event_order_number]
data.trigger.tels_with_trigger = tels_with_data
if self.allowed_tels:
data.trigger.tels_with_trigger = np.intersect1d(
data.trigger.tels_with_trigger,
self.subarray.tel_ids,
assume_unique=True
)
if not self.is_mc:
data.trigger.tel[tel_i + 1] = TelescopeTriggerContainer(
time=Time(event_data['unix'], format='unix', scale='utc')
)
# Event counter
data.count = counter
data.index.obs_id = obs_id
data.index.event_id = event_id
# Setting up the R0 container
data.r0.tel.clear()
data.r1.tel.clear()
data.dl0.tel.clear()
data.dl1.tel.clear()
data.pointing.tel.clear()
# Creating the telescope pointing container
pointing = PointingContainer()
pointing_tel = TelescopePointingContainer(
azimuth=np.deg2rad(event_data['pointing_az']) * u.rad,
altitude=np.deg2rad(90 - event_data['pointing_zd']) * u.rad,)
pointing.tel[tel_i + 1] = pointing_tel
pointing.array_azimuth = np.deg2rad(event_data['pointing_az']) * u.rad
pointing.array_altitude = np.deg2rad(90 - event_data['pointing_zd']) * u.rad
pointing.array_ra = np.deg2rad(event_data['pointing_ra']) * u.rad
pointing.array_dec = | np.deg2rad(event_data['pointing_dec']) | numpy.deg2rad |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# This file is part of the
# Pyedra Project (https://github.com/milicolazo/Pyedra/).
# Copyright (c) 2020, <NAME>
# License: MIT
# Full Text: https://github.com/milicolazo/Pyedra/blob/master/LICENSE
# ============================================================================
# DOCS
# ============================================================================
"""Implementation of phase function for asteroids."""
# =============================================================================
# IMPORTS
# =============================================================================
import attr
import matplotlib.pyplot as plt
from matplotlib import cm
import numpy as np
import pandas as pd
import scipy
import scipy.interpolate
import scipy.optimize as optimization
from . import core, datasets
# ============================================================================
# CLASSES
# ============================================================================
@attr.s(frozen=True)
class HG1G2Plot(core.BasePlot):
"""Plots for HG1G2 fit."""
default_plot_kind = "curvefit"
def curvefit(
self,
df,
idc="id",
alphac="alpha",
magc="v",
ax=None,
cmap=None,
fit_kwargs=None,
data_kwargs=None,
):
"""Plot the phase function using the HG1G2 model.
Parameters
----------
df: ``pandas.DataFrame``
The dataframe must with the values
idc : ``str``, optional (default=id)
Column with the mpc number of the asteroids.
alphac : ``str``, optional (default=alpha)
Column with the phase angle of the asteroids.
magc : ``str``, optional (default=v)
Column with the magnitude. The default 'v' value is reference
to the reduced magnitude in Johnson's V filter.
ax : ``matplotlib.pyplot.Axis``, (optional)
Matplotlib axis
cmap : ``None``, ``str`` or calable (optional)
Name of the color map to be used
(https://matplotlib.org/users/colormaps.html).
If is None, the default colors of the matplotlib.pyplot.plot
function is used, and if, and is a callable is used as
colormap generator.
fit_kwargs: ``dict`` or ``None`` (optional)
The parameters to send to the fit curve plot.
Only ``label`` and ``color`` can't be provided.
data_kwargs: ``dict`` or ``None`` (optional)
The parameters to send to the data plot.
Only ``label`` and ``color`` can't be provided.
Return
------
``matplotlib.pyplot.Axis`` :
The axis where the method draws.
"""
def fit_y(d, e, f):
y = d - 2.5 * np.log10(e * fi1 + f * fi2 + (1 - e - f) * fi3)
return y
if ax is None:
ax = plt.gca()
fig = ax.get_figure()
fig.set_size_inches(self.DEFAULT_FIGURE_SIZE)
ax.invert_yaxis()
ax.set_title("HG1G2 - Phase curves")
ax.set_xlabel("Phase angle")
ax.set_ylabel(magc.upper())
fit_kwargs = {} if fit_kwargs is None else fit_kwargs
fit_kwargs.setdefault("ls", "--")
fit_kwargs.setdefault("alpha", 0.5)
data_kwargs = {} if data_kwargs is None else data_kwargs
data_kwargs.setdefault("marker", "o")
data_kwargs.setdefault("ls", "None")
model_size = len(self.pdf.model_df)
if cmap is None:
colors = [None] * model_size
elif callable(cmap):
colors = cmap(np.linspace(0, 1, model_size))
else:
cmap = cm.get_cmap(cmap)
colors = cmap(np.linspace(0, 1, model_size))
for idx, m_row in self.pdf.iterrows():
row_id = int(m_row.id)
data = df[df[idc] == m_row.id]
fi1 = np.array([])
fi2 = np.array([])
fi3 = np.array([])
for alpha_b in data[alphac]:
p1 = self.pdf.metadata.y_interp1(alpha_b)
fi1 = np.append(fi1, p1)
p2 = self.pdf.metadata.y_interp2(alpha_b)
fi2 = np.append(fi2, p2)
p3 = self.pdf.metadata.y_interp3(alpha_b)
fi3 = np.append(fi3, p3)
v_fit = fit_y(m_row.H12, m_row.G1, m_row.G2)
line = ax.plot(
data[alphac],
v_fit,
label=f"Fit #{row_id}",
color=colors[idx],
**fit_kwargs,
)
# data part
ax.plot(
data[alphac],
data[magc],
color=line[0].get_color(),
label=f"Data #{row_id}",
**data_kwargs,
)
# reorder legend for two columns
handles, labels = ax.get_legend_handles_labels()
labels, handles = zip(
*sorted(zip(labels, handles), key=lambda t: t[0])
)
ax.legend(handles, labels, ncol=2, loc="best")
return ax
# ============================================================================
# FUNCTIONS
# ============================================================================
def _HG1G2_model(X, a, b, c):
x, y, z = X
return a * x + b * y + c * z
def HG1G2_fit(df, idc="id", alphac="alpha", magc="v"):
"""Fit (H-G1-G2) system to data from table.
HG1G2_fit calculates the H,G1 and G2 parameters of the phase
function following the procedure described in [5]_ .
Parameters
----------
df: ``pandas.DataFrame``
The dataframe must with the values
idc : ``str``, optional (default=id)
Column with the mpc number of the asteroids.
alphac : ``str``, optional (default=alpha)
Column with the phase angle of the asteroids.
magc : ``str``, optional (default=v)
Column with the magnitude. The default 'v' value is reference
to the reduced magnitude in Johnson's V filter.
Returns
-------
``PyedraFitDataFrame``
The output contains eight columns: id (mpc number of
the asteroid), H (absolute magnitude returned by the fit),
H error (fit H parameter error), G1 (G1 parameter returned by
the fit), G1 error (fit G1 parameter error), G2 (G2 parameter
returned bythe fit), G2 error (fit G2 parameter error), and R
(fit determination coefficient).
References
----------
.. [5] <NAME>., <NAME>., <NAME>., <NAME>.,
<NAME>.,<NAME>., <NAME>., 2010,
Icarus, 209, 542.
"""
lt = core.obs_counter(df, 3, idc, alphac)
if len(lt):
lt_str = " - ".join(str(idx) for idx in lt)
raise ValueError(
f"Some asteroids has less than 3 observations: {lt_str}"
)
noob = df.drop_duplicates(subset=idc, keep="first", inplace=False)
size = len(noob)
id_column = np.empty(size, dtype=int)
H_1_2_column = np.empty(size)
error_H_1_2_column = np.empty(size)
G_1_column = np.empty(size)
error_G_1_column = np.empty(size)
G_2_column = np.empty(size)
error_G_2_column = np.empty(size)
R_column = np.empty(size)
observations = np.empty(size, dtype=int)
penttila2016 = datasets.load_penttila2016()
alpha = penttila2016["alpha"].to_numpy()
phi1 = penttila2016["phi1"].to_numpy()
phi2 = penttila2016["phi2"].to_numpy()
phi3 = penttila2016["phi3"].to_numpy()
y_interp1 = scipy.interpolate.interp1d(alpha, phi1)
y_interp2 = scipy.interpolate.interp1d(alpha, phi2)
y_interp3 = scipy.interpolate.interp1d(alpha, phi3)
for idx, id in enumerate(noob[idc]):
data = df[df[idc] == id]
fi1 = np.array([])
fi2 = np.array([])
fi3 = np.array([])
for alpha_b in data[alphac]:
p1 = y_interp1(alpha_b)
fi1 = np.append(fi1, p1)
p2 = y_interp2(alpha_b)
fi2 = | np.append(fi2, p2) | numpy.append |
"""
This scripts is similar to dynamic.py. Yet, since we simulate the vortex-
induced load, we need to solve for the new lift variable q, so we consider a
new mixed function space with additional variables.
The additional constants present in this script are:
- Cl0:
mean lift coefficient.
- A, eps:
coupling constants used in the model of Facchinetti et al. (2004)
"""
import time, datetime
import numpy as np
from fenics import *
# =============================================================================
St = 0.16
bend_to_twist = 1.5
# Aerodynamic coefficients
Cd = 1.2
Cl0 = 0.3
# Coupling constants
A, eps = 12., 0.3
def initialise_results():
"""
[t, n, b, w, fext, q, speed]
"""
return [np.empty(0), np.empty(0), np.empty(0), | np.empty(0) | numpy.empty |
import SimpleITK as sitk
import numpy as np
import os
import paths
import csv
import math
from scipy.io import loadmat
from skimage.measure import regionprops, marching_cubes_classic, mesh_surface_area
def divide_hcp(connectivity_matrix, hcp_connectivity):
''' divide the connectivity matrix by the hcp matrix'''
assert(connectivity_matrix.shape == hcp_connectivity.shape)
output_matrix = np.zeros(connectivity_matrix.shape)
for i in range(connectivity_matrix.shape[0]):
for j in range(connectivity_matrix.shape[1]):
if hcp_connectivity[i,j] != 0:
output_matrix[i,j] = connectivity_matrix[i,j]/hcp_connectivity[i,j]
return output_matrix
def get_hcp_connectivity_matrice(hcp_connectivity_matrices_path = paths.hcp_connectivity_matrices_path):
'''Get the pass-type and end-type connectivity matrices from HCP1021 subjects'''
end_matrix_path = os.path.join(hcp_connectivity_matrices_path, 'HCP1021.1mm.fib.gz.aal.count.end.connectivity.mat')
pass_matrix_path = os.path.join(hcp_connectivity_matrices_path, 'HCP1021.1mm.fib.gz.aal.count.pass.connectivity.mat')
end_obj = loadmat(end_matrix_path)
end_matrix = end_obj['connectivity']
pass_obj = loadmat(pass_matrix_path)
pass_matrix = pass_obj['connectivity']
return pass_matrix, end_matrix
def ReadImage(path):
''' This code returns the numpy nd array for a MR image at path'''
return sitk.GetArrayFromImage(sitk.ReadImage(path)).astype(np.float32)
def find_list(subject_id, list):
''' this is used to find the stroke lesion for a subject name '''
files = [file for file in list if subject_id in file]
return files[0]
def find_3d_surface(mask, voxel_spacing=(1.0,1.0,1.0)):
''' find the surface for a 3D object '''
verts, faces = marching_cubes_classic(volume=mask, spacing=voxel_spacing)
return mesh_surface_area(verts, faces)
def find_3d_roundness(mask):
''' find the roundess of a 3D object '''
mask_region_props = regionprops(mask.astype(int))
mask_area = mask_region_props[0].area
mask_equivDiameter = (6.0*mask_area/math.pi)**(1.0/3.0)
mask_major_axis_length = mask_region_props[0].major_axis_length
return mask_equivDiameter**2/mask_major_axis_length**2
def reshape_by_padding_upper_coords(image, new_shape, pad_value=None):
''' reshape the 3d matrix '''
shape = tuple(list(image.shape))
new_shape = tuple(np.max(np.concatenate((shape, new_shape)).reshape((2,len(shape))), axis=0))
if pad_value is None:
if len(shape)==2:
pad_value = image[0,0]
elif len(shape)==3:
pad_value = image[0, 0, 0]
else:
raise ValueError("Image must be either 2 or 3 dimensional")
res = np.ones(list(new_shape), dtype=image.dtype) * pad_value
if len(shape) == 2:
res[0:0+int(shape[0]), 0:0+int(shape[1])] = image
elif len(shape) == 3:
res[0:0+int(shape[0]), 0:0+int(shape[1]), 0:0+int(shape[2])] = image
return res
# ======================= Tools for connectivity matrix ============================================= #
def threshold_connectivity_matrix(connectivity_matrix, threshold=0.01):
''' threshold the connectiivty matrix in order to remove the noise'''
thresholded_connectivity_matrix= np.copy(connectivity_matrix)
thresholded_connectivity_matrix[connectivity_matrix <= threshold*np.amax(connectivity_matrix)] = 0.0
return thresholded_connectivity_matrix
def weight_conversion(W):
''' convert to the normalized version and binary version'''
W_bin = np.copy(W)
W_bin[W!=0]=1
W_nrm = np.copy(W)
W_nrm = W_nrm/np.amax(np.absolute(W))
return W_nrm, W_bin
def get_lesion_weights(stroke_mni_path):
''' get the weight vector(workshop paper)'''
aal_path = os.path.join(paths.dsi_studio_path, 'atlas', 'aal.nii.gz')
aal_nda = ReadImage(aal_path)
aal_182_218_182 = reshape_by_padding_upper_coords(aal_nda, (182,218,182), 0)
stroke_mni_nda = ReadImage(stroke_mni_path)
weights = np.zeros(int(np.amax(aal_182_218_182)), dtype=float)
for bp_number in range(int(np.amax(aal_182_218_182))):
mask = np.zeros(aal_182_218_182.shape, aal_182_218_182.dtype)
mask[aal_182_218_182==(bp_number+1)]=1
bp_size = float(np.count_nonzero(mask))
stroke_in_bp = np.multiply(mask, stroke_mni_nda)
stroke_in_bp_size = float(np.count_nonzero(stroke_in_bp))
weights[bp_number] = stroke_in_bp_size/bp_size
#weights[bp_number] = stroke_in_bp_size
return weights
def get_modified_lesion_weights(stroke_mni_path):
''' get the modified weight vector'''
aal_path = os.path.join(paths.dsi_studio_path, 'atlas', 'aal.nii.gz')
aal_nda = ReadImage(aal_path)
aal_182_218_182 = reshape_by_padding_upper_coords(aal_nda, (182,218,182), 0)
stroke_mni_nda = ReadImage(stroke_mni_path)
stroke_volume = float(np.count_nonzero(stroke_mni_nda))
weights = np.zeros(int(np.amax(aal_182_218_182)), dtype=float)
for bp_number in range(int(np.amax(aal_182_218_182))):
mask = np.zeros(aal_182_218_182.shape, aal_182_218_182.dtype)
mask[aal_182_218_182==(bp_number+1)]=1
#bp_size = float(np.count_nonzero(mask))
stroke_in_bp = np.multiply(mask, stroke_mni_nda)
stroke_volume_in_bp = float(np.count_nonzero(stroke_in_bp))
#weights[bp_number] = 1.0 + stroke_volume_in_bp/stroke_volume
weights[bp_number] = stroke_volume_in_bp/stroke_volume
#remaining_volume = stroke_volume - np.sum(weights)
#print(remaining_volume)
return weights
def get_train_dataset():
'''Give you the training dataset'''
gt_subject_paths = [os.path.join(root, name) for root, dirs, files in os.walk(paths.isles2017_training_dir) for name in files if '.OT.' in name and '__MACOSX' not in root and name.endswith('.nii')]
gt_subject_paths.sort()
# The CSV file for train dataset
train_mRS_file = "ISLES2017_Training.csv"
train_mRS_path = os.path.join(paths.isles2017_dir, train_mRS_file)
assert(os.path.isfile(train_mRS_path))
# Read CSV file for Train dataset
train_dataset = {}
with open(train_mRS_path, 'rt') as csv_file:
csv_reader = csv.reader(csv_file)
for line in csv_reader:
if line[2] == '90' or line[2] == '88' or line[2] == '96' or line[2] == '97': # 90 days
subject_name = line[0]
gt_file = [file for file in gt_subject_paths if '/'+subject_name+'/' in file]
if gt_file:
train_dataset[subject_name]={}
train_dataset[subject_name]['mRS'] = line[1]
train_dataset[line[0]]['TICI'] = line[3]
train_dataset[line[0]]['TSS'] = line[4]
train_dataset[line[0]]['TTT'] = line[5]
train_dataset[line[0]]['ID'] = gt_file[0][-10:-4]
train_dataset[line[0]]['tracts'] = line[6]
return train_dataset
# Get the mRS for training subject from training_1 to training_48
def extract_gt_mRS():
'''extract the mRS for training subjects from training_1 to training_48'''
mRS_gt = np.zeros((40, ))
train_dataset = get_train_dataset()
for idx, subject_name in enumerate(train_dataset.keys()):
mRS_gt[idx] = train_dataset[subject_name]['mRS']
return mRS_gt
def extract_tract_features():
''' extract number of tracts'''
train_dataset = get_train_dataset()
tracts = np.zeros((40, 1))
for idx, subject_name in enumerate(train_dataset.keys()):
tracts[idx] = train_dataset[subject_name]['tracts']
return tracts, ['tracts']
# Extract the volume of stroke in MNI152 space
def extract_volumetric_features():
# The ground truth lesions in MNI space
volumetric_list = ["volume"]
stroke_mni_dir = os.path.join(paths.dsi_studio_path, 'gt_stroke')
stroke_mni_paths = [os.path.join(root, name) for root, dirs, files in os.walk(stroke_mni_dir) for name in files if name.endswith('nii.gz')]
stroke_mni_paths.sort()
assert(len(stroke_mni_paths) == 43)
# Volumetric Features
volumetric_features = np.zeros((40,1))
train_dataset = get_train_dataset()
for idx, subject_name in enumerate(train_dataset.keys()):
subject_id = train_dataset[subject_name]['ID']
stroke_mni_path = find_list(subject_id, stroke_mni_paths)
#volumetric features
stroke_mni_nda = ReadImage(stroke_mni_path)
volumetric_features[idx] = np.count_nonzero(stroke_mni_nda)
return volumetric_features, volumetric_list
def extract_spatial_features():
# The ground truth lesions in MNI space
stroke_mni_dir = os.path.join(paths.dsi_studio_path, 'gt_stroke')
stroke_mni_paths = [os.path.join(root, name) for root, dirs, files in os.walk(stroke_mni_dir) for name in files if name.endswith('nii.gz')]
stroke_mni_paths.sort()
assert(len(stroke_mni_paths) == 43)
spatial_list = ["centroid_z", "centroid_y", "centroid_x"]
# Volumetric Features
spatial_features = np.zeros((40,3))
train_dataset = get_train_dataset()
for idx, subject_name in enumerate(train_dataset.keys()):
subject_id = train_dataset[subject_name]['ID']
stroke_mni_path = find_list(subject_id, stroke_mni_paths)
stroke_mni_nda = ReadImage(stroke_mni_path)
stroke_regions = regionprops(stroke_mni_nda.astype(int))
stroke_centroid = stroke_regions[0].centroid
spatial_features[idx, :] = stroke_centroid
return spatial_features, spatial_list
def extract_morphological_features():
# The ground truth lesions in MNI space
stroke_mni_dir = os.path.join(paths.dsi_studio_path, 'gt_stroke')
stroke_mni_paths = [os.path.join(root, name) for root, dirs, files in os.walk(stroke_mni_dir) for name in files if name.endswith('nii.gz')]
stroke_mni_paths.sort()
assert(len(stroke_mni_paths) == 43)
morphological_list = ["major", "minor", "major/minor", "surface", "solidity", "roundness"]
# Volumetric Features
morphological_features = np.zeros((40,6), dtype=np.float32)
train_dataset = get_train_dataset()
for idx, subject_name in enumerate(train_dataset.keys()):
subject_id = train_dataset[subject_name]['ID']
stroke_mni_path = find_list(subject_id, stroke_mni_paths)
stroke_mni_nda = ReadImage(stroke_mni_path)
stroke_regions = regionprops(stroke_mni_nda.astype(int))
stroke_major_axis_length = stroke_regions[0].major_axis_length
stroke_minor_axis_length = stroke_regions[0].minor_axis_length
stroke_surface = find_3d_surface(stroke_mni_nda.astype(int))
stroke_roundness = find_3d_roundness(stroke_mni_nda.astype(int))
morphological_features[idx, :] = stroke_major_axis_length, stroke_minor_axis_length, stroke_major_axis_length/stroke_minor_axis_length, stroke_surface, stroke_regions[0].solidity, stroke_roundness
return morphological_features, morphological_list
def extract_tractographic_features(weight_type, aal_regions=116):
# The ground truth lesion in subject space
gt_subject_paths = [os.path.join(root, name) for root, dirs, files in os.walk(paths.isles2017_training_dir) for name in files if '.OT.' in name and '__MACOSX' not in root and name.endswith('.nii')]
# New connectivity matrices location
connectivity_train_dir = os.path.join(paths.dsi_studio_path, 'connectivity', 'gt_stroke')
# pass type locations
connectivity_pass_files = [os.path.join(root, name) for root, dirs, files in os.walk(connectivity_train_dir) for name in files if 'count' in name and 'ncount' not in name and 'connectivity' in name and 'pass' in name and name.endswith('.mat')]
connectivity_pass_files.sort()
# end type locations
connectivity_end_files = [os.path.join(root, name) for root, dirs, files in os.walk(connectivity_train_dir) for name in files if 'count' in name and 'ncount' not in name and 'connectivity' in name and 'end' in name and name.endswith('.mat')]
connectivity_end_files.sort()
# The ground truth lesions in MNI space
stroke_mni_dir = os.path.join(paths.dsi_studio_path, 'gt_stroke')
stroke_mni_paths = [os.path.join(root, name) for root, dirs, files in os.walk(stroke_mni_dir) for name in files if name.endswith('nii.gz')]
stroke_mni_paths.sort()
tractographic_list = ["tract_aal_"+str(i) for i in range(1, aal_regions+1)]
assert(len(connectivity_pass_files) == len(connectivity_end_files) == len(stroke_mni_paths) == 43)
train_dataset = get_train_dataset()
# Tractographic Features
W_dsi_pass_histogram_features = np.zeros((40, aal_regions), dtype=np.float32)
W_nrm_pass_histogram_features = np.zeros((40, aal_regions), dtype=np.float32)
W_bin_pass_histogram_features = np.zeros((40, aal_regions), dtype=np.float32)
W_dsi_end_histogram_features = np.zeros((40, aal_regions), dtype=np.float32)
W_nrm_end_histogram_features = np.zeros((40, aal_regions), dtype=np.float32)
W_bin_end_histogram_features = np.zeros((40, aal_regions), dtype=np.float32)
for idx, subject_name in enumerate(train_dataset.keys()):
subject_id = train_dataset[subject_name]['ID']
connectivity_pass_file = find_list(subject_id, connectivity_pass_files)
connectivity_pass_obj = loadmat(connectivity_pass_file)
thresholded_connectivity_pass = threshold_connectivity_matrix(connectivity_pass_obj['connectivity'], 0)
W_nrm_pass, W_bin_pass = weight_conversion(thresholded_connectivity_pass)
connectivity_end_file = find_list(subject_id, connectivity_end_files)
connectivity_end_obj = loadmat(connectivity_end_file)
thresholded_connectivity_end = threshold_connectivity_matrix(connectivity_end_obj['connectivity'], 0)
W_nrm_end, W_bin_end = weight_conversion(thresholded_connectivity_end)
stroke_mni_path = find_list(subject_id, stroke_mni_paths)
# =================================== Weight Vector ========================================== #
# Get the lesion weights
if 'ori' in weight_type:
lesion_weights = get_lesion_weights(stroke_mni_path)
# Get the modified lesion weights
if 'mod' in weight_type:
lesion_weights = get_modified_lesion_weights(stroke_mni_path)
# No weight
if 'one' in weight_type:
lesion_weights = np.ones((1,aal_regions), dtype=np.float32)
# weighted connectivity histogram
W_dsi_pass_histogram_features[idx, :] = np.multiply(np.sum(thresholded_connectivity_pass, axis=0), lesion_weights)
W_nrm_pass_histogram_features[idx, :] = np.multiply(np.sum(W_nrm_pass, axis=0), lesion_weights)
W_bin_pass_histogram_features[idx, :] = np.multiply(np.sum(W_bin_pass, axis=0), lesion_weights)
W_dsi_end_histogram_features[idx, :] = np.multiply(np.sum(thresholded_connectivity_end, axis=0), lesion_weights)
W_nrm_end_histogram_features[idx, :] = np.multiply(np.sum(W_nrm_end, axis=0), lesion_weights)
W_bin_end_histogram_features[idx, :] = np.multiply(np.sum(W_bin_end, axis=0), lesion_weights)
return W_dsi_pass_histogram_features, W_nrm_pass_histogram_features, W_bin_pass_histogram_features, W_dsi_end_histogram_features, W_nrm_end_histogram_features, W_bin_end_histogram_features, tractographic_list
def extract_volumetric_spatial_features(atlas_name):
'''extract volumetric spatial features'''
stroke_mni_dir = os.path.join(paths.dsi_studio_path, 'gt_stroke')
stroke_mni_paths = [os.path.join(root, name) for root, dirs, files in os.walk(stroke_mni_dir) for name in files if name.endswith('nii.gz')]
stroke_mni_paths.sort()
train_dataset = get_train_dataset()
atlas_path = os.path.join(paths.dsi_studio_path, 'atlas', atlas_name+'.nii.gz')
atlas_nda = ReadImage(atlas_path)
if atlas_name == 'aal':
atlas_nda = reshape_by_padding_upper_coords(atlas_nda, (182,218,182), 0)
volumetric_spatial_features = np.zeros((40, int(np.amax(atlas_nda))+1), dtype=float)
for idx, subject_name in enumerate(train_dataset.keys()):
subject_id = train_dataset[subject_name]['ID']
stroke_mni_path = find_list(subject_id, stroke_mni_paths)
stroke_mni_nda = ReadImage(stroke_mni_path)
whole_stroke_volume = float(np.count_nonzero(stroke_mni_nda))
for bp_number in range(1, int(np.amax(atlas_nda)+1)):
mask = np.zeros(atlas_nda.shape, atlas_nda.dtype)
mask[atlas_nda==(bp_number)]=1
stroke_in_bp = np.multiply(mask, stroke_mni_nda)
stroke_in_bp_volume = np.count_nonzero(stroke_in_bp)
volumetric_spatial_features[idx, bp_number] = stroke_in_bp_volume
total_stroke_volume_bp = np.sum(volumetric_spatial_features[idx, :])
volumetric_spatial_features[idx, 0] = whole_stroke_volume - total_stroke_volume_bp
volumetric_spatial_list =['volume_'+atlas_name+'_'+str(i) for i in range(0, int(np.amax(atlas_nda)+1))]
return volumetric_spatial_features, volumetric_spatial_list
def extract_modified_volumetric_spatial_features(atlas_name):
'''extract volumetric spatial features considering the total volume of the stroke lesion'''
stroke_mni_dir = os.path.join(paths.dsi_studio_path, 'gt_stroke')
stroke_mni_paths = [os.path.join(root, name) for root, dirs, files in os.walk(stroke_mni_dir) for name in files if name.endswith('nii.gz')]
stroke_mni_paths.sort()
train_dataset = get_train_dataset()
atlas_path = os.path.join(paths.dsi_studio_path, 'atlas', atlas_name+'.nii.gz')
atlas_nda = ReadImage(atlas_path)
if atlas_name == 'aal':
atlas_nda = reshape_by_padding_upper_coords(atlas_nda, (182,218,182), 0)
modified_volumetric_spatial_features = np.zeros((40, int(np.amax(atlas_nda))), dtype=float)
for idx, subject_name in enumerate(train_dataset.keys()):
subject_id = train_dataset[subject_name]['ID']
stroke_mni_path = find_list(subject_id, stroke_mni_paths)
stroke_mni_nda = ReadImage(stroke_mni_path)
whole_stroke_volume = float(np.count_nonzero(stroke_mni_nda))
for bp_number in range(1, int(np.amax(atlas_nda))+1):
mask = np.zeros(atlas_nda.shape, atlas_nda.dtype)
mask[atlas_nda==(bp_number)]=1
stroke_in_bp = np.multiply(mask, stroke_mni_nda)
stroke_in_bp_volume = float(np.count_nonzero(stroke_in_bp))
modified_volumetric_spatial_features[idx, bp_number-1] = stroke_in_bp_volume / whole_stroke_volume
volumetric_spatial_list =['volume_'+atlas_name+'_'+str(i) for i in range(1, int(np.amax(atlas_nda))+1)]
assert((len(volumetric_spatial_list))==modified_volumetric_spatial_features.shape[1])
return modified_volumetric_spatial_features, volumetric_spatial_list
def extract_new_tractographic_features(weight_type, aal_regions=116):
# The ground truth lesion in subject space
gt_subject_paths = [os.path.join(root, name) for root, dirs, files in os.walk(paths.isles2017_training_dir) for name in files if '.OT.' in name and '__MACOSX' not in root and name.endswith('.nii')]
# New connectivity matrices location
connectivity_train_dir = os.path.join(paths.dsi_studio_path, 'connectivity', 'gt_stroke')
# pass type locations
connectivity_pass_files = [os.path.join(root, name) for root, dirs, files in os.walk(connectivity_train_dir) for name in files if 'count' in name and 'ncount' not in name and 'connectivity' in name and 'pass' in name and name.endswith('.mat')]
connectivity_pass_files.sort()
# end type locations
connectivity_end_files = [os.path.join(root, name) for root, dirs, files in os.walk(connectivity_train_dir) for name in files if 'count' in name and 'ncount' not in name and 'connectivity' in name and 'end' in name and name.endswith('.mat')]
connectivity_end_files.sort()
# The ground truth lesions in MNI space
stroke_mni_dir = os.path.join(paths.dsi_studio_path, 'gt_stroke')
stroke_mni_paths = [os.path.join(root, name) for root, dirs, files in os.walk(stroke_mni_dir) for name in files if name.endswith('nii.gz')]
stroke_mni_paths.sort()
tractographic_list = ["tract_aal_"+str(i) for i in range(1, aal_regions+1)]
assert(len(connectivity_pass_files) == len(connectivity_end_files) == len(stroke_mni_paths) == 43)
train_dataset = get_train_dataset()
# Tractographic Features
W_pass_histogram_features = np.zeros((40, aal_regions), dtype=np.float32)
W_end_histogram_features = np.zeros((40, aal_regions), dtype=np.float32)
for idx, subject_name in enumerate(train_dataset.keys()):
HCP_pass, HCP_end = get_hcp_connectivity_matrice()
subject_id = train_dataset[subject_name]['ID']
connectivity_pass_file = find_list(subject_id, connectivity_pass_files)
connectivity_pass_obj = loadmat(connectivity_pass_file)
connectivity_pass_matrix = connectivity_pass_obj['connectivity']
#normalized_pass_matrix = divide_hcp(connectivity_pass_matrix, HCP_pass)
connectivity_end_file = find_list(subject_id, connectivity_end_files)
connectivity_end_obj = loadmat(connectivity_end_file)
connectivity_end_matrix = connectivity_end_obj['connectivity']
#normalized_end_matrix = divide_hcp(connectivity_pass_matrix, HCP_end)
stroke_mni_path = find_list(subject_id, stroke_mni_paths)
# =================================== Weight Vector ========================================== #
# Get the lesion weights
if 'ori' in weight_type:
lesion_weights = get_lesion_weights(stroke_mni_path)
# Get the modified lesion weights
if 'mod' in weight_type:
lesion_weights = get_modified_lesion_weights(stroke_mni_path)
# No weight
if 'one' in weight_type:
lesion_weights = np.ones((1,aal_regions), dtype=np.float32)
normalized_pass_matrix = np.divide(np.sum(connectivity_pass_matrix, axis=0), np.sum(HCP_pass, axis=0))
normalized_end_matrix = np.divide(np.sum(connectivity_end_matrix, axis=0), np.sum(HCP_end, axis=0))
# weighted connectivity histogram
W_pass_histogram_features[idx, :] = | np.multiply(normalized_pass_matrix, lesion_weights) | numpy.multiply |
# ----------------------------------------------------------------------
# Imports
# ----------------------------------------------------------------------
import VyPy
from VyPy.data import ibunch
from VyPy.optimize.drivers import Driver
import numpy as np
from time import time
from VyPy.exceptions import MaxEvaluations
try:
import scipy
import scipy.optimize
except ImportError:
pass
# ----------------------------------------------------------------------
# Sequential Least Squares Quadratic Programming
# ----------------------------------------------------------------------
class SLSQP(Driver):
def __init__(self):
''' see http://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.fmin_slsqp.html for more info
'''
# import check
import scipy.optimize
Driver.__init__(self)
self.verbose = True
self.max_iterations = 1000
self.max_evaluations = 10000
self.objective_accuracy = None
def run(self,problem):
# store the problem
self.problem = problem
# cache
self._current_x = None
self._current_eval = 0
# single objective
assert len(problem.objectives) == 1 , 'too many objectives'
# optimizer
import scipy.optimize
optimizer = scipy.optimize.fmin_slsqp
# inputs
func = self.func
x0 = problem.variables.scaled.initials_array()
f_eqcons = self.f_eqcons
f_ieqcons = self.f_ieqcons
bounds = problem.variables.scaled.bounds_array()
fprime = self.fprime
fprime_ieqcons = self.fprime_ieqcons
fprime_eqcons = self.fprime_eqcons
iprint = 2
iters = self.max_iterations
accuracy = self.objective_accuracy or 1e-6
## objective scaling
#accuracy = accuracy / problem.objective.scale
# printing
if not self.verbose: iprint = 0
# constraints?
if not problem.constraints.inequalities: f_ieqcons = None
if not problem.constraints.equalities: f_eqcons = None
# gradients?
dobj,dineq,deq = problem.has_gradients()
if not dobj: fprime = None
if not (f_ieqcons and dineq): fprime_ieqcons = None
if not (f_eqcons and deq) : fprime_eqcons = None
# for catching max_evaluations
self._current_x = x0
# start timing
tic = time()
# run the optimizer
try: # for catching custom exits
x_min,f_min,its,imode,smode = optimizer(
func = func ,
x0 = x0 ,
f_eqcons = f_eqcons ,
f_ieqcons = f_ieqcons ,
bounds = bounds ,
fprime = fprime ,
fprime_ieqcons = fprime_ieqcons ,
fprime_eqcons = fprime_eqcons ,
iprint = iprint ,
full_output = True ,
iter = iters ,
acc = accuracy ,
**self.other_options.to_dict()
)
except MaxEvaluations:
its = None # can't know major iterations unless gradients are provided
imode = 10 # custom mode number
smode = 'Evaluation limit exceeded'
x_min = self._current_x
## TODO - check constraints are met to tolerance, scipy doesn't do this
# stop timing
toc = time() - tic
# get final variables
vars_min = self.problem.variables.scaled.unpack_array(x_min)
# pack outputs
outputs = self.pack_outputs(vars_min)
outputs.success = imode == 0
outputs.messages.exit_flag = imode
outputs.messages.exit_message = smode
outputs.messages.iterations = its
outputs.messages.evaluations = self._current_eval
outputs.messages.run_time = toc
# done!
return outputs
def func(self,x):
# check number of evaluations
max_eval = self.max_evaluations
if max_eval and max_eval>0 and self._current_eval >= max_eval:
raise MaxEvaluations
self._current_x = x
self._current_eval += 1
# evaluate the objective function
objective = self.problem.objectives[0]
result = objective.function(x)
result = result[0,0]
return result
def f_ieqcons(self,x):
inequalities = self.problem.inequalities
result = [ -1.*inequality.function(x) for inequality in inequalities ]
result = | np.vstack(result) | numpy.vstack |
import numpy as np
import numpy.linalg as la
import matplotlib.pyplot as plt
import os
from pathlib import Path
import pickle
import warnings
class Trace:
"""
Stores the logs of running an optimization method
and plots the trajectory.
Arguments:
loss (Oracle): the optimized loss class
label (string, optional): label for convergence plots (default: None)
"""
def __init__(self, loss, label=None):
self.loss = loss
self.label = label
self.xs = []
self.ts = []
self.its = []
self.loss_vals = []
self.its_converted_to_epochs = False
self.ls_its = None
def compute_loss_of_iterates(self):
if len(self.loss_vals) == 0:
self.loss_vals = np.asarray([self.loss.value(x) for x in self.xs])
else:
warnings.warn('Loss values have already been computed. Set .loss_vals = [] to recompute.')
def convert_its_to_epochs(self, batch_size=1):
if self.its_converted_to_epochs:
warnings.warn('The iteration count has already been converted to epochs.')
return
its_per_epoch = self.loss.n / batch_size
self.its = np.asarray(self.its) / its_per_epoch
self.its_converted_to_epochs = True
def plot_losses(self, its=None, f_opt=None, label=None, markevery=None, ls_its=True, time=False, *args, **kwargs):
if its is None:
if ls_its and self.ls_its is not None:
its = self.ls_its
elif time:
its = self.ts
else:
its = self.its
if len(self.loss_vals) == 0:
self.compute_loss_of_iterates()
if f_opt is None:
f_opt = self.loss.f_opt
if label is None:
label = self.label
if markevery is None:
markevery = max(1, len(self.loss_vals)//20)
plt.plot(its, self.loss_vals - f_opt, label=label, markevery=markevery, *args, **kwargs)
plt.ylabel(r'$f(x)-f^*$')
def plot_distances(self, its=None, x_opt=None, label=None, markevery=None, ls_its=True, time=False, *args, **kwargs):
if its is None:
if ls_its and self.ls_its is not None:
its = self.ls_its
elif time:
its = self.ts
else:
its = self.its
if x_opt is None:
if self.loss.x_opt is None:
x_opt = self.xs[-1]
else:
x_opt = self.loss.x_opt
if label is None:
label = self.label
if markevery is None:
markevery = max(1, len(self.xs)//20)
dists = [self.loss.norm(x-x_opt)**2 for x in self.xs]
its = self.ls_its if ls_its and self.ls_its else self.its
plt.plot(its, dists, label=label, markevery=markevery, *args, **kwargs)
plt.ylabel(r'$\Vert x-x^*\Vert^2$')
@property
def best_loss_value(self):
if len(self.loss_vals) == 0:
self.compute_loss_of_iterates()
return np.min(self.loss_vals)
def save(self, file_name, path='./results/'):
# To make the dumped file smaller, remove the loss
loss_ref_copy = self.loss
self.loss = None
Path(path).mkdir(parents=True, exist_ok=True)
with open(path + file_name, 'wb') as f:
pickle.dump(self, f)
self.loss = loss_ref_copy
@classmethod
def from_pickle(cls, path, loss=None):
if not os.path.isfile(path):
return None
with open(path, 'rb') as f:
trace = pickle.load(f)
trace.loss = loss
if loss is not None:
loss.f_opt = min(self.best_loss_value, loss.f_opt)
return trace
class StochasticTrace:
"""
Class that stores the logs of running a stochastic
optimization method and plots the trajectory.
"""
def __init__(self, loss, label=None):
self.loss = loss
self.label = label
self.xs_all = {}
self.ts_all = {}
self.its_all = {}
self.loss_vals_all = {}
self.its_converted_to_epochs = False
self.loss_is_computed = False
def init_seed(self):
self.xs = []
self.ts = []
self.its = []
self.loss_vals = None
def append_seed_results(self, seed):
self.xs_all[seed] = self.xs.copy()
self.ts_all[seed] = self.ts.copy()
self.its_all[seed] = self.its.copy()
self.loss_vals_all[seed] = self.loss_vals.copy() if self.loss_vals else None
def compute_loss_of_iterates(self):
for seed, loss_vals in self.loss_vals_all.items():
if loss_vals is None:
self.loss_vals_all[seed] = np.asarray([self.loss.value(x) for x in self.xs_all[seed]])
else:
warnings.warn("""Loss values for seed {} have already been computed.
Set .loss_vals_all[{}] = [] to recompute.""".format(seed, seed))
self.loss_is_computed = True
@property
def best_loss_value(self):
if not self.loss_is_computed:
self.compute_loss_of_iterates()
return np.min([np.min(loss_vals) for loss_vals in self.loss_vals_all.values()])
def convert_its_to_epochs(self, batch_size=1):
if self.its_converted_to_epochs:
return
self.its_per_epoch = self.loss.n / batch_size
for seed, its in self.its_all.items():
self.its_all[seed] = np.asarray(its) / self.its_per_epoch
self.its = np.asarray(self.its) / self.its_per_epoch
self.its_converted_to_epochs = True
def plot_losses(self, its=None, f_opt=None, log_std=True, label=None, markevery=None, alpha=0.25, *args, **kwargs):
if not self.loss_is_computed:
self.compute_loss_of_iterates()
if its is None:
its = np.mean([np.asarray(its_) for its_ in self.its_all.values()], axis=0)
if f_opt is None:
f_opt = self.loss.f_opt
if log_std:
y_log = [np.log(loss_vals-f_opt) for loss_vals in self.loss_vals_all.values()]
y_log_ave = np.mean(y_log, axis=0)
y_log_std = np.std(y_log, axis=0)
lower, upper = np.exp(y_log_ave - y_log_std), np.exp(y_log_ave + y_log_std)
y_ave = np.exp(y_log_ave)
else:
y = [loss_vals-f_opt for loss_vals in self.loss_vals_all.values()]
y_ave = np.mean(y, axis=0)
y_std = np.std(y, axis=0)
lower, upper = y_ave - y_std, y_ave + y_std
if label is None:
label = self.label
if markevery is None:
markevery = max(1, len(y_ave)//20)
plot = plt.plot(its, y_ave, label=label, markevery=markevery, *args, **kwargs)
if len(self.loss_vals_all.keys()) > 1:
plt.fill_between(its, lower, upper, alpha=alpha, color=plot[0].get_color())
plt.ylabel(r'$f(x)-f^*$')
def plot_distances(self, its=None, x_opt=None, log_std=True, label=None, markevery=None, alpha=0.25, *args, **kwargs):
if its is None:
its = np.mean([np.asarray(its_) for its_ in self.its_all.values()], axis=0)
if x_opt is None:
if self.loss.x_opt is None:
x_opt = self.xs[-1]
else:
x_opt = self.loss.x_opt
dists = [np.asarray([self.loss.norm(x-x_opt)**2 for x in xs]) for xs in self.xs_all.values()]
if log_std:
y_log = [np.log(dist) for dist in dists]
y_log_ave = np.mean(y_log, axis=0)
y_log_std = | np.std(y_log, axis=0) | numpy.std |
import logging
import numpy as np
import cv2
def bbox2roi(bbox):
'''
Args: [x1, y1, width, height]
Returns: [y1, x1, y2, x2]
'''
if not (isinstance(bbox, (list, tuple))):
raise TypeError('Need a list of a tuple, got %s' % type(bbox))
if not len(bbox) == 4:
raise ValueError('Need 4 numbers, not %d.' % len(bbox))
for x in bbox:
if not (isinstance(x, (int, float))):
raise TypeError('Each element must be a number, got %s' % type(x))
if bbox[2] < 0 or bbox[3] < 0:
raise ValueError('Bbox %s has negative width or height.' % str(bbox))
return [bbox[1], bbox[0], bbox[3] + bbox[1], bbox[2] + bbox[0]]
def roi2bbox(roi):
'''
Args: [y1, x1, y2, x2]
Returns: [x1, y1, width, height]
'''
if not (isinstance(roi, list) or isinstance(roi, tuple)):
raise TypeError('Need a list of a tuple, got %s' % type(roi))
if not len(roi) == 4:
raise ValueError('Need 4 numbers, not %d.' % len(roi))
for x in roi:
if not (isinstance(x, (int, float))):
raise TypeError('Each element must be a number, got %s' % type(x))
if roi[2] < roi[0] or roi[3] < roi[1]:
raise ValueError('Roi %s has negative width or height.' % str(roi))
return [roi[1], roi[0], roi[3] - roi[1], roi[2] - roi[0]]
def getIoU(roi1, roi2):
' Computes intersection over union for two rectangles. '
intersection_y = max(0, (min(roi1[2], roi2[2]) - max(roi1[0], roi2[0])))
intersection_x = max(0, (min(roi1[3], roi2[3]) - max(roi1[1], roi2[1])))
intersection = intersection_x * intersection_y
area1 = (roi1[3] - roi1[1]) * (roi1[2] - roi1[0])
area2 = (roi2[3] - roi2[1]) * (roi2[2] - roi2[0])
union = area1 + area2 - intersection
IoU = intersection / union if union > 0 else 0.
return IoU
def expandRoiBorder(roi, imsize, perc, integer_result=True):
'''
Expands a ROI, and clips it within borders.
Floats are rounded to the nearest integer.
'''
imheight, imwidth = imsize
perc_y, perc_x = perc
if (perc_y, perc_x) == (0, 0): return roi
half_delta_y = float(roi[2] + 1 - roi[0]) * perc_y / 2
half_delta_x = float(roi[3] + 1 - roi[1]) * perc_x / 2
# the result must be within (imheight, imwidth)
bbox_height = roi[2] + 1 - roi[0] + half_delta_y * 2
bbox_width = roi[3] + 1 - roi[1] + half_delta_x * 2
if bbox_height > imheight or bbox_width > imwidth:
logging.warning(
'expanded bbox of size (%d,%d) does not fit into image (%d,%d)' %
(bbox_height, bbox_width, imheight, imwidth))
# if so, decrease half_delta_y, half_delta_x
coef = min(imheight / bbox_height, imwidth / bbox_width)
logging.warning('decreased bbox to (%d,%d)' %
(bbox_height, bbox_width))
bbox_height *= coef
bbox_width *= coef
logging.warning('decreased bbox to (%d,%d)' %
(bbox_height, bbox_width))
half_delta_y = (bbox_height - (roi[2] + 1 - roi[0])) * 0.5
half_delta_x = (bbox_width - (roi[3] + 1 - roi[1])) * 0.5
# and a small epsilon to account for floating-point imprecisions
EPS = 0.001
# expand each side
roi[0] -= (half_delta_y - EPS)
roi[1] -= (half_delta_x - EPS)
roi[2] += (half_delta_y - EPS)
roi[3] += (half_delta_x - EPS)
# move to clip into borders
if roi[0] < 0:
roi[2] += abs(roi[0])
roi[0] = 0
if roi[1] < 0:
roi[3] += abs(roi[1])
roi[1] = 0
if roi[2] > imheight - 1:
roi[0] -= abs((imheight - 1) - roi[2])
roi[2] = imheight - 1
if roi[3] > imwidth - 1:
roi[1] -= abs((imwidth - 1) - roi[3])
roi[3] = imwidth - 1
# check that now averything is within borders (bbox is not too big)
assert roi[0] >= 0 and roi[1] >= 0, str(roi)
assert roi[2] <= imheight - 1 and roi[3] <= imwidth - 1, str(roi)
# make integer
if integer_result:
roi = [int(round(x)) for x in roi]
return roi
def expandRoi(roi, perc, integer_result=True):
''' Expands a ROI. Floats are rounded to the nearest integer. '''
roi = list(roi)
perc_y, perc_x = perc
if (perc_y, perc_x) == (0, 0): return roi
half_delta_y = float(roi[2] + 1 - roi[0]) * perc_y / 2
half_delta_x = float(roi[3] + 1 - roi[1]) * perc_x / 2
# and a small epsilon to account for floating-point imprecisions
EPS = 0.001
# expand each side
roi[0] -= (half_delta_y - EPS)
roi[1] -= (half_delta_x - EPS)
roi[2] += (half_delta_y - EPS)
roi[3] += (half_delta_x - EPS)
# make integer
if integer_result:
roi = [int(round(x)) for x in roi]
return roi
def expandPolygon(xs, ys, perc, integer_result=True):
'''
Expand polygon from its median center in all directions.
Floating-point numbers are then rounded to the nearest integer.
Args:
xs: A list of x values.
ys: A list of y values.
perc: A tuple of (perc_y, perc_x). Both values are float from -1 to inf.
Returns:
xs: A list of x values.
ys: A list of y values.
'''
perc_y, perc_x = perc
center_x = np.array(xs, dtype=float).mean()
center_y = | np.array(ys, dtype=float) | numpy.array |
# To add a new cell, type '# %%'
# To add a new markdown cell, type '# %% [markdown]'
# %% [markdown]
# # Dependencies
# Created by wrborrelli
# %%
import sys
import itertools
import matplotlib.pyplot as plt
from scipy.spatial import ConvexHull
from scipy.optimize import linprog
from scipy.optimize import lsq_linear
from scipy.spatial import HalfspaceIntersection
import numpy as np
from numpy.linalg import lstsq
import pint
from pint import UnitRegistry
units = UnitRegistry()
Q_ = units.Quantity
# %% [markdown]
# # Sample Inputs
# %%
reagent1= {'Gamma-Butyrolactone': '0.0 M'}
reagent2= {'Lead Diiodide': '1.81 M',
'Formamidinium Iodide': '1.36 M',
'Gamma-Butyrolactone': '0.0 M'}
reagent3 ={'Formamidinium Iodide': '2.63 M',
'Gamma-Butyrolactone': '0.0 M'}
reagent7= {'Formic Acid': '26.5 M'}
reagents = [reagent1, reagent2, reagent3, reagent7]
descriptions=['Reagent1', 'Reagent2', 'Reagent3', 'Reagent7']
# %%
nExpt = 96
maxMolarity = 15.0
finalVolume = '500 ul'
# %% [markdown]
# ## Helper Functions
# %%
def get_hull_centroid(hull: ConvexHull):
""" Returns the centroid of the supplied scipy convex hull object.
Args:
hull: a scipy convex hull object
Returns:
np.array() of centroids for each axis.
>>> get_hull_centroid(ConvexHull(np.array([[1.8, 0., 0.],[0. , 0., 0.], [0., 0., 26.5], [0., 2., 1.]])))
array([0.45, 0.5, 6.875])
"""
return np.array([np.mean(hull.points[hull.vertices, i]) for i in range(len(hull.points) - 1)])
# %%
def convex_hull_intersection(points1: np.ndarray, points2: np.ndarray, vis2d=False):
""" Returns the points corresponding to the intersecting region of two convex hulls (up to however many dimensions scipy ConvexHull takes (9D I think).
Args:
points1: np.array() of points corresponding to the first convex hull.
points2: np.array() of points corresponding to the second convex hull.
vis2d: True/False if you want to visualize the resulting region (2D hulls only)
Returns:
np.array() of points corresponding to the intersection region.
"""
assert points1.shape[1] == points2.shape[1]
hull1 = ConvexHull(points1)
hull2 = ConvexHull(points2)
A = np.vstack((hull1.equations[:, :-1], hull2.equations[:, :-1]))
b = np.hstack((hull1.equations[:, -1], hull2.equations[:, -1]))
res = linprog(c=np.zeros(A.shape[1]), A_ub=A, b_ub=-b, method="interior-point")
feasible_point = res.x
hint = HalfspaceIntersection(np.vstack((hull1.equations, hull2.equations)), feasible_point)
if vis2d:
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1, aspect='equal')
xlim, ylim = (0, 1), (0, 1)
ax.set_xlim(xlim)
ax.set_ylim(ylim)
for simplex in hull1.simplices:
ax.plot(points1[simplex, 0], points1[simplex, 1], 'r-')
for simplex in hull2.simplices:
ax.plot(points2[simplex, 0], points2[simplex, 1], 'b-')
x, y = zip(*hint.intersections)
ax.plot(x, y, 'k^', markersize=8)
plt.savefig("{}".format(__file__).replace(".py", ".png"))
return hint.intersections
# %%
def bb_to_cds(bb):
""" Converts bounding box notation to vertex coordinates.
Args:
bb: list of lists of bounding box notation [[mins], [maxes]] corresponding to the mins and maxes for each axis.
Returns:
np.array() of points corresponding to the vertices of the bounding box.
"""
outp = list(itertools.product(*bb))
out = []
for i in outp:
temp = []
for j in i:
if isinstance(j, list):
for k in j:
temp.append(k)
else:
temp.append(j)
out.append(temp)
return np.array(out)
# %%
def bb_cds(corners):
""" Returns a list of the mins and maxes for each axis in a bounding box, given the corners of concentration space.
Args:
corners: np.array() of points corresponding to the corners of the concentration space.
Returns:
list of [[mins], [maxes]] defining the resulting bounding box.
"""
tarray = np.transpose(corners)
maxes = [max(tarray[i]) for i in range(len(corners[0]))]
mins = [min(tarray[i]) for i in range(len(corners[0]))]
return [mins, maxes]
# %%
def box_int(b1, b2):
""" Gets the coordinates of the overlapping region for two n-dimensional hypercubes.
Args:
b1: list of lists in bounding box notation [[mins], [maxes]] for each axis of box 1
b2: list of lists in bounding box notation [[mins], [maxes]] for each axis of box 2
Returns:
List of the coordinates for the overlapping region (returns 0 for no overlap).
"""
olap_cds = []
for i in range(len(b1.T)):
if (b1.T[i][0] <= b2.T[i][0]):
None
else:
b1, b2 = b2, b1
if ((b1.T[i][0] <= b1.T[i][1]) and (b1.T[i][1] <= b2.T[i][0]) and (b2.T[i][0] <= b2.T[i][1])):
return 0
elif ((b1.T[i][0] <= b2.T[i][0]) and (b2.T[i][0] <= b1.T[i][1]) and (b1.T[i][1] <= b2.T[i][1])):
olap_cds.append([b2.T[i][0], b1.T[i][1]])
elif ((b1.T[i][0] <= b2.T[i][0]) and (b2.T[i][0] <= b2.T[i][1]) and (b2.T[i][1] <= b1.T[i][1])):
olap_cds.append([b2.T[i][0], b2.T[i][1]])
return olap_cds
# %%
def in_hull(points, x):
""" Tests if a point is inside the ConvexHull given by points.
Args:
points: np.array() of points defining the ConvexHull.
x: point to be tested for inclusion in the Convexhull.
Returns:
True: point is inside hull.
False: point is not inside hull
"""
n_points = len(points)
n_dim = len(x)
c = np.zeros(n_points)
A = np.r_[points.T, np.ones((1, n_points))]
b = np.r_[x, | np.ones(1) | numpy.ones |
##
# \file simple_itk_helper.py
# \brief Utility functions associated to SimpleITK and ITK
#
# \author <NAME> (<EMAIL>)
# \date September 2015
#
import re
import os
import itk
import six
import fnmatch
import datetime
import subprocess
import numpy as np
import nibabel as nib
import SimpleITK as sitk
import matplotlib.pyplot as plt
import pysitk.python_helper as ph
from pysitk.definitions import VIEWER
from pysitk.definitions import DIR_TMP
from pysitk.definitions import ITKSNAP_EXE, FSLVIEW_EXE, NIFTYVIEW_EXE
# Use ITK-SNAP instead of imageJ to view images
os.environ['SITK_SHOW_COMMAND'] = ITKSNAP_EXE
TRANSFORM_SITK_DOF_LABELS_LONG = {
6: ["angle_x [rad]",
"angle_y [rad]",
"angle_z [rad]",
"t_x [mm]",
"t_y [mm]",
"t_z [mm]"],
}
TRANSFORM_SITK_DOF_LABELS_SHORT = {
6: ["angle_x",
"angle_y",
"angle_z",
"t_x",
"t_y",
"t_z"],
}
##
# Get composite transform of two affine/euler sitk transforms
# \see http://insightsoftwareconsortium.github.io/SimpleITK-Notebooks/Python_html/22_Transforms.html
# \date 2017-08-14 11:51:32+0100
#
# \param transform_outer The transform outer
# \param transform_inner The transform inner
#
# \return The composite sitk affine/euler transform.
#
def get_composite_sitk_affine_transform(transform_outer, transform_inner):
dim = transform_outer.GetDimension()
A_inner = np.asarray(transform_inner.GetMatrix()).reshape(dim, dim)
c_inner = np.asarray(transform_inner.GetCenter())
t_inner = np.asarray(transform_inner.GetTranslation())
A_outer = np.asarray(transform_outer.GetMatrix()).reshape(dim, dim)
c_outer = np.asarray(transform_outer.GetCenter())
t_outer = np.asarray(transform_outer.GetTranslation())
A_composite = A_outer.dot(A_inner)
c_composite = c_inner
t_composite = A_outer.dot(
t_inner + c_inner - c_outer) + t_outer + c_outer - c_inner
if transform_outer.GetName() == "AffineTransform" \
or transform_inner.GetName() == "AffineTransform" \
or transform_outer.GetName() != transform_inner.GetName():
trafo = sitk.AffineTransform(dim)
else:
trafo = eval("sitk." + transform_outer.GetName() + "()")
trafo.SetMatrix(A_composite.flatten())
trafo.SetTranslation(t_composite)
trafo.SetCenter(c_composite)
return trafo
##
# Composite two Euler Transforms
# \param[in] transform_outer as sitk::simple::EulerxDTransform
# \param[in] transform_inner as sitk::simple::EulerxDTransform
# \return \p tranform_outer
# \f$ \circ
# \f$ \p transform_inner as sitk.EulerxDTransform
# \see http://insightsoftwareconsortium.github.io/SimpleITK-Notebooks/22_Transforms.html
#
def get_composite_sitk_euler_transform(transform_outer, transform_inner):
# Guarantee type sitk::simple::AffineTransform of transformations
# transform_outer = sitk.AffineTransform(transform_outer)
# transform_inner = sitk.AffineTransform(transform_inner)
dim = transform_outer.GetDimension()
A_inner = np.asarray(transform_inner.GetMatrix()).reshape(dim, dim)
c_inner = np.asarray(transform_inner.GetCenter())
t_inner = np.asarray(transform_inner.GetTranslation())
A_outer = np.asarray(transform_outer.GetMatrix()).reshape(dim, dim)
c_outer = np.asarray(transform_outer.GetCenter())
t_outer = np.asarray(transform_outer.GetTranslation())
A_composite = A_outer.dot(A_inner)
c_composite = c_inner
t_composite = A_outer.dot(
t_inner + c_inner - c_outer) + t_outer + c_outer - c_inner
euler = eval("sitk." + transform_outer.GetName() + "()")
euler.SetMatrix(A_composite.flatten())
euler.SetTranslation(t_composite)
euler.SetCenter(c_composite)
return euler
##
# Get direction for sitk.Image object from sitk.AffineTransform instance. The
# information of the image is required to extract spacing information and
# associated image dimension
# \param[in] affine_transform_sitk sitk.AffineTransform instance
# \param[in] image_or_spacing_sitk provide entire image as sitk object or
# spacing directly
# \return image direction which can be used to update the sitk.Image via
# image_sitk.SetDirection(direction)
#
def get_sitk_image_direction_from_sitk_affine_transform(affine_transform_sitk,
image_or_spacing_sitk):
dim = affine_transform_sitk.GetDimension()
try:
spacing_sitk = np.array(image_or_spacing_sitk.GetSpacing())
except:
spacing_sitk = np.array(image_or_spacing_sitk)
S_inv_sitk = | np.diag(1 / spacing_sitk) | numpy.diag |
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""SO(8) gauged D=4 supergravity.
Usage: python3 -i -m dim4.so8.src.analysis
"""
# Naming deviates from PEP-8 conventions where this makes mathematics easier
# to read. Also, local variables may name-match module-global definitions.
# pylint:disable=invalid-name
# pylint:disable=redefined-outer-name
import collections
import itertools
import os
from dim4.generic import a123
from m_theory_lib import algebra
from m_theory_lib import m_util as mu
from m_theory_lib import supergravity
import numpy
import tensorflow as tf
### Supergravity ###
class SO8_SUGRA(supergravity.SUGRA):
"""D=4 SO(8) Supergravity.
In addition to the base class attributes, this class adds...:
Attributes:
e7: The e7 algebra that was used to define this supergravity.
"""
signature = supergravity.SUGRASignature(
name='SO8',
gauge_algebra_name='so(8)',
dim=4,
generator_scaling=+1,
dim_scalar_manifold=70,
num_model_params=1, # The 'dyonic angle'.
scalar_masses_dV_from_right=False,
scalar_masses_factor=36.0,
gravitino_masses_factor=6.0,
fermion_masses_factor=6.0,
vector_masses_factor=6.0,
num_spurious_vector_masses=28)
def __init__(self,
use_dwn_stationarity=True,
e7=algebra.g.e7,
squash_stationarity_tf_func=tf.math.asinh,
**kwargs):
"""Initializes the instance.
Args:
use_dwn_stationarity: Whether to use the de A1/A2 formula from the
de Wit-Nicolai 'SO(8) Supergravity' paper to compute the
stationarity-violation (rather than taking the naive gradient-squared).
e7: The e7 algebra to use.
squash_stationarity_tf_func: Optional 'squashing' function that is used
to squash the stationarity-violation ([]-tf.Tensor -> []-tf.Tensor).
**kwargs: keyword parameters to be passed on to superclass __init__().
"""
super().__init__(e7.t_a_ij_kl,
squash_stationarity_tf_func=squash_stationarity_tf_func,
**kwargs)
self._use_dwn_stationarity = use_dwn_stationarity
self.e7 = e7
self._tc_28_8_8 = tf.constant(
e7.su8.m_28_8_8.astype(numpy.complex128),
dtype=tf.complex128)
def _expand_ijkl(self, t_ab):
"""Index-expands 28, 28 -> [8, 8] [8, 8]."""
return 0.5 * tf.einsum(
'ijB,BIJ->ijIJ',
tf.einsum('AB,Aij->ijB', t_ab, self._tc_28_8_8),
self._tc_28_8_8)
def _canonicalize_equilibrium_sc(self, v70, diagonalize_8x8s=True,
rng=None, verbose=True):
"""Simplifies a location on the scalar manifold by rotation."""
if rng is None:
rng = numpy.random.RandomState()
m8x8s = mu.nsum('Aij,A->ij', self.e7.su8.m_35_8_8.real, v70[:35])
m8x8c = mu.nsum('Aij,A->ij', self.e7.su8.m_35_8_8.real, v70[35:])
rot = self.e7.spin8.get_diagonalizing_rotation(
m8x8s if diagonalize_8x8s else m8x8c)
decomposed_rot = mu.product_decompose_rotation(rot)
resynthesized_rot = mu.resynthesize_rotation_for_rep(
8, 8, decomposed_rot, 'ab,->ab', numpy.ones([]))
if not numpy.allclose(rot, resynthesized_rot, rtol=1e-3, atol=1e-5):
raise ValueError(
'Resynthesized rotation does not match original rotation.')
generator_mapping_spec = 'sS,sScC->cC' if diagonalize_8x8s else 'cC,sScC->sS'
rep_action = 0.25 * self.e7.spin8.gamma_sscc
rot_other_rep = mu.resynthesize_rotation_for_rep(
8, 8, decomposed_rot, generator_mapping_spec, rep_action)
(rot_s, rot_c) = ((rot, rot_other_rep) if diagonalize_8x8s
else (rot_other_rep, rot))
canon_m8x8s = rot_s.T @ m8x8s @ rot_s
canon_m8x8c = rot_c.T @ m8x8c @ rot_c
if diagonalize_8x8s:
gens_postdiag = mu.get_generators_for_post_diagonalization_reduction(
numpy.diag(canon_m8x8s), 'gsS,sScC->gcC', self.e7.spin8.gamma_sscc)
else:
gens_postdiag = mu.get_generators_for_post_diagonalization_reduction(
numpy.diag(canon_m8x8c), 'gcC,sScC->gsS', self.e7.spin8.gamma_sscc)
tc_rot_gens = mu.tff64(gens_postdiag)
tc_8x8s = mu.tff64(canon_m8x8s)
tc_8x8c = mu.tff64(canon_m8x8c)
@tf.function
def tf_rotated_8x8(t_rot_params):
t_rot = mu.tf_expm(
tf.einsum('gab,g->ab', tc_rot_gens, t_rot_params))
if diagonalize_8x8s:
tc_rotated_8x8 = tf.linalg.matmul(
t_rot @ tc_8x8c, t_rot, transpose_b=True)
else:
tc_rotated_8x8 = tf.linalg.matmul(
t_rot @ tc_8x8s, t_rot, transpose_b=True)
return tc_rotated_8x8
@tf.function
def tf_loss(t_rot_params):
t_8x8 = tf_rotated_8x8(t_rot_params)
ret = tf.reduce_sum(tf.abs(t_8x8))
return ret
if gens_postdiag.shape[0] == 0:
return self.e7.v70_from_35s35c(canon_m8x8s, canon_m8x8c)
_, opt_rot_params = mu.tf_minimize_v2(
tf_loss,
rng.normal(scale=1.0, size=gens_postdiag.shape[0]),
default_gtol=1e-14)
opt_8x8 = tf_rotated_8x8(mu.tff64(opt_rot_params)).numpy()
if diagonalize_8x8s:
return self.e7.v70_from_35s35c(canon_m8x8s, opt_8x8)
else:
return self.e7.v70_from_35s35c(opt_8x8, canon_m8x8c)
def canonicalize_equilibrium(self, v70, **kwargs):
"""Simplifies a location on the scalar manifold by rotation."""
v70 = | numpy.asarray(v70) | numpy.asarray |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Jun 20 11:51:49 2021
@author: arslan
"""
from pyit2fls import (IT2FS_Gaussian_UncertStd, IT2FS_LGaussian_UncertStd,
IT2FS_RGaussian_UncertStd, IT2Mamdani, product_t_norm,
probabilistic_sum_s_norm, IT2FS_plot, crisp, )
from numpy import (random, linspace, array, zeros, shape, sort,
maximum, minimum, )
from scipy.optimize import (differential_evolution, minimize, basinhopping, )
from PyPSO import PyPSO
class Classifier:
def normalizeParameters(self, parameters, n=3):
p = zeros(shape=(3 * n + 2 + 3 ** n, ))
for i in range(n):
p[3 * i:3 * (i + 1)] = sort(parameters[3 * i:3 * (i + 1)])
p[3 * n:3 * n + 2] = maximum(0., minimum(1., sort(parameters[3 * n:3 * n + 2])))
p[3 * n + 2:] = parameters[3 * n + 2:] > 0
return p
def __init__(self, attributes, decisions, parameters, n=3):
self.attributes = attributes
self.decisions = decisions
self.p = self.normalizeParameters(parameters)
self.idomain = linspace(-1.0, 1.0, 1001)
self.odomain = linspace( 0.0, 1.0, 1001)
self.att1_s1 = IT2FS_RGaussian_UncertStd(self.idomain, params=[self.p[0],
0.25, 0.05, 1.0])
self.att1_s2 = IT2FS_Gaussian_UncertStd(self.idomain, params=[self.p[1],
0.25, 0.05, 1.0])
self.att1_s3 = IT2FS_LGaussian_UncertStd(self.idomain, params=[self.p[2],
0.25, 0.05, 1.0])
self.ATT1_SETS = [self.att1_s1, self.att1_s2, self.att1_s3]
self.att2_s1 = IT2FS_RGaussian_UncertStd(self.idomain, params=[self.p[3],
0.25, 0.05, 1.0])
self.att2_s2 = IT2FS_Gaussian_UncertStd(self.idomain, params=[self.p[4],
0.25, 0.05, 1.0])
self.att2_s3 = IT2FS_LGaussian_UncertStd(self.idomain, params=[self.p[5],
0.25, 0.05, 1.0])
self.ATT2_SETS = [self.att2_s1, self.att2_s2, self.att2_s3]
self.att3_s1 = IT2FS_RGaussian_UncertStd(self.idomain, params=[self.p[6],
0.25, 0.05, 1.0])
self.att3_s2 = IT2FS_Gaussian_UncertStd(self.idomain, params=[self.p[7],
0.25, 0.05, 1.0])
self.att3_s3 = IT2FS_LGaussian_UncertStd(self.idomain, params=[self.p[8],
0.25, 0.05, 1.0])
self.ATT3_SETS = [self.att3_s1, self.att3_s2, self.att3_s3]
self.deci_s1 = IT2FS_RGaussian_UncertStd(self.odomain, params=[self.p[9],
0.25, 0.05, 1.0])
self.deci_s2 = IT2FS_LGaussian_UncertStd(self.odomain, params=[self.p[10],
0.25, 0.05, 1.0])
self.DECI_SETS = [self.deci_s1, self.deci_s2]
self.DM = IT2Mamdani(product_t_norm, probabilistic_sum_s_norm)
self.DM.add_input_variable("ATT1")
self.DM.add_input_variable("ATT2")
self.DM.add_input_variable("ATT3")
self.DM.add_output_variable("DECI")
for i in range(3):
for j in range(3):
for k in range(3):
self.DM.add_rule([("ATT1", self.ATT1_SETS[i]),
("ATT2", self.ATT2_SETS[j]),
("ATT3", self.ATT3_SETS[k])],
[("DECI", self.DECI_SETS[int(self.p[11 + i * 9 + j * 3 + k])])])
def __call__(self, att1, att2, att3):
o, tr = self.DM.evaluate({"ATT1": att1, "ATT2": att2, "ATT3": att3})
return crisp(tr["DECI"])
def error(self):
err = 0.
for attribute, decision in zip(self.attributes, self.decisions):
o = self.__call__(*attribute)
if o > 0.51 and decision != 1:
err += o - 0.51
elif o < 0.49 and decision != 0:
err += 0.49 - o
return err / len(self.decisions)
if __name__ == "__main__":
def parametersGenerator(n=3):
return 2 * (random.rand(3 * n + 2 + 3 ** n) - 0.5)
def velocityGenerator(n=3):
return 4. * ( | random.rand(3 * n + 2 + 3 ** n) | numpy.random.rand |
import numpy as np
import os
import re
import requests
import sys
import time
from netCDF4 import Dataset
import pandas as pd
from bs4 import BeautifulSoup
from tqdm import tqdm
# setup constants used to access the data from the different M2M interfaces
BASE_URL = 'https://ooinet.oceanobservatories.org/api/m2m/' # base M2M URL
SENSOR_URL = '12576/sensor/inv/' # Sensor Information
# setup access credentials
AUTH = ['OOIAPI-853A3LA6QI3L62', '<KEY>']
def M2M_Call(uframe_dataset_name, start_date, end_date):
options = '?beginDT=' + start_date + '&endDT=' + end_date + '&format=application/netcdf'
r = requests.get(BASE_URL + SENSOR_URL + uframe_dataset_name + options, auth=(AUTH[0], AUTH[1]))
if r.status_code == requests.codes.ok:
data = r.json()
else:
return None
# wait until the request is completed
print('Waiting for OOINet to process and prepare data request, this may take up to 20 minutes')
url = [url for url in data['allURLs'] if re.match(r'.*async_results.*', url)][0]
check_complete = url + '/status.txt'
with tqdm(total=400, desc='Waiting') as bar:
for i in range(400):
r = requests.get(check_complete)
bar.update(1)
if r.status_code == requests.codes.ok:
bar.n = 400
bar.last_print_n = 400
bar.refresh()
print('\nrequest completed in %f minutes.' % elapsed)
break
else:
time.sleep(3)
elapsed = (i * 3) / 60
return data
def M2M_Files(data, tag=''):
"""
Use a regex tag combined with the results of the M2M data request to collect the data from the THREDDS catalog.
Collected data is gathered into an xarray dataset for further processing.
:param data: JSON object returned from M2M data request with details on where the data is to be found for download
:param tag: regex tag to use in discriminating the data files, so we only collect the correct ones
:return: the collected data as an xarray dataset
"""
# Create a list of the files from the request above using a simple regex as a tag to discriminate the files
url = [url for url in data['allURLs'] if re.match(r'.*thredds.*', url)][0]
files = list_files(url, tag)
return files
def list_files(url, tag=''):
"""
Function to create a list of the NetCDF data files in the THREDDS catalog created by a request to the M2M system.
:param url: URL to user's THREDDS catalog specific to a data request
:param tag: regex pattern used to distinguish files of interest
:return: list of files in the catalog with the URL path set relative to the catalog
"""
page = requests.get(url).text
soup = BeautifulSoup(page, 'html.parser')
pattern = re.compile(tag)
return [node.get('href') for node in soup.find_all('a', text=pattern)]
def M2M_Data(nclist,variables):
thredds = 'https://opendap.oceanobservatories.org/thredds/dodsC/ooi/'
#nclist is going to contain more than one url eventually
for jj in range(len(nclist)):
url=nclist[jj]
url=url[25:]
dap_url = thredds + url + '#fillmismatch'
openFile = Dataset(dap_url,'r')
for ii in range(len(variables)):
dum = openFile.variables[variables[ii].name]
variables[ii].data = np.append(variables[ii].data, dum[:].data)
tmp = variables[0].data/60/60/24
time_converted = pd.to_datetime(tmp, unit='D', origin=pd.Timestamp('1900-01-01'))
return variables, time_converted
class var(object):
def __init__(self):
"""A Class that generically holds data with a variable name
and the units as attributes"""
self.name = ''
self.data = np.array([])
self.units = ''
def __repr__(self):
return_str = "name: " + self.name + '\n'
return_str += "units: " + self.units + '\n'
return_str += "data: size: " + str(self.data.shape)
return return_str
class structtype(object):
def __init__(self):
""" A class that imitates a Matlab structure type
"""
self._data = []
def __getitem__(self, index):
"""implement index behavior in the struct"""
if index == len(self._data):
self._data.append(var())
return self._data[index]
def __len__(self):
return len(self._data)
def M2M_URLs(platform_name,node,instrument_class,method):
var_list = structtype()
#MOPAK
if platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/SBD17/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD11/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD11/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/SBD17/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD11/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD11/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSPM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/SBS01/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#METBK
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
#FLORT
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/SBD17/06-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/SBD17/06-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/04-FLORTK000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
#FDCHP
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'FDCHP' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/08-FDCHPA000/telemetered/fdchp_a_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#DOSTA
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD37/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD37/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD37/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD37/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/02-DOFSTK000/telemetered/dofst_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dofst_k_oxygen_l2'
var_list[2].name = 'dofst_k_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'Hz'
var_list[3].units = 'dbar'
#ADCP
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/01-ADCPTA000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/01-ADCPTC000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/01-ADCPTA000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/01-ADCPTC000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/04-ADCPTM000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/04-ADCPTM000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/04-ADCPTC000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/04-ADCPSJ000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
#ZPLSC
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#WAVSS
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_statistics'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_statistics'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_statistics'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_statistics'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
#VELPT
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/SBD17/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD11/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD11/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/SBD17/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD11/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD11/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
#PCO2W
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
#PHSEN
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
#SPKIR
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
#PRESF
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/02-PRESFA000/telemetered/presf_abc_dcl_tide_measurement'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/02-PRESFA000/telemetered/presf_abc_dcl_tide_measurement'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/02-PRESFB000/telemetered/presf_abc_dcl_tide_measurement'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/02-PRESFC000/telemetered/presf_abc_dcl_tide_measurement'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
#CTDBP
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD37/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/SBD17/06-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD37/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/SBD17/06-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD37/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD37/03-CTDBPE000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
#VEL3D
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/01-VEL3DD000/telemetered/vel3d_cd_dcl_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/01-VEL3DD000/telemetered/vel3d_cd_dcl_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/01-VEL3DD000/telemetered/vel3d_cd_dcl_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/01-VEL3DD000/telemetered/vel3d_cd_dcl_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
#VEL3DK
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/01-VEL3DK000/telemetered/vel3d_k_wfp_stc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_k_eastward_velocity'
var_list[2].name = 'vel3d_k_northward_velocity'
var_list[3].name = 'vel3d_k_upward_velocity'
var_list[4].name = 'vel3d_k_heading'
var_list[5].name = 'vel3d_k_pitch'
var_list[6].name = 'vel3d_k_roll'
var_list[7].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'ddegrees'
var_list[5].units = 'ddegrees'
var_list[6].units = 'ddegrees'
var_list[7].units = 'dbar'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/03-CTDPFK000/telemetered/ctdpf_ckl_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'ctdpf_ckl_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdpf_ckl_seawater_pressure'
var_list[5].name = 'ctdpf_ckl_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
#PCO2A
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
#PARAD
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/05-PARADK000/telemetered/parad_k__stc_imodem_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_k_par'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
#OPTAA
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD37/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD37/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD37/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD37/01-OPTAAC000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#NUTNR
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
##
#MOPAK
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/SBD17/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD11/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD11/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/SBD17/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD11/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD11/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSPM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSPM/SBS01/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#METBK
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
#FLORT
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/SBD17/06-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/SBD17/06-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID27/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID27/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID27/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID27/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
#FDCHP
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'FDCHP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD12/08-FDCHPA000/recovered_host/fdchp_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#DOSTA
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID27/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID27/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID27/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID27/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD37/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD37/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD37/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD37/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
#ADCP
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID26/01-ADCPTA000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID26/01-ADCPTC000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID26/01-ADCPTA000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID26/01-ADCPTC000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD35/04-ADCPTM000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD35/04-ADCPTM000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD35/04-ADCPTC000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD35/04-ADCPSJ000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
#WAVSS
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_statistics_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_statistics_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_statistics_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_statistics_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
#VELPT
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/SBD17/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD11/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD11/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
#uframe_dataset_name = 'CE06ISSM/RID16/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
uframe_dataset_name = 'CE06ISSM/RID16/04-VELPTA000/recovered_host/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD11/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD11/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID26/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID26/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID26/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID26/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
#PCO2W
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD35/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD35/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD35/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD35/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
#PHSEN
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID26/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID26/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID26/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID26/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD35/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD35/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD35/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD35/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
#SPKIR
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID26/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID26/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID26/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID26/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
#PRESF
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD35/02-PRESFA000/recovered_host/presf_abc_dcl_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD35/02-PRESFA000/recovered_host/presf_abc_dcl_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD35/02-PRESFB000/recovered_host/presf_abc_dcl_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD35/02-PRESFC000/recovered_host/presf_abc_dcl_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
#CTDBP
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD37/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/SBD17/06-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD37/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/SBD17/06-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID27/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID27/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID27/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID27/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD37/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD37/03-CTDBPE000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
#VEL3D
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD35/01-VEL3DD000/recovered_host/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD35/01-VEL3DD000/recovered_host/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD35/01-VEL3DD000/recovered_host/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD35/01-VEL3DD000/recovered_host/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
#PCO2A
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD12/04-PCO2AA000/recovered_host/pco2a_a_dcl_instrument_water_recovered'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD12/04-PCO2AA000/recovered_host/pco2a_a_dcl_instrument_water_recovered'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD12/04-PCO2AA000/recovered_host/pco2a_a_dcl_instrument_water_recovered'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD12/04-PCO2AA000/recovered_host/pco2a_a_dcl_instrument_water_recovered'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
#OPTAA
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID27/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID27/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID27/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID27/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD37/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD37/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD37/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD37/01-OPTAAC000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#NUTNR
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID26/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID26/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID26/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID26/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/RID16/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD37/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/SBD17/06-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/RID16/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD37/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/SBD17/06-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/RID27/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/RID27/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSSM/RID27/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/RID27/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD37/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD37/03-CTDBPE000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE09OSPM/WFP01/03-CTDPFK000/recovered_wfp/ctdpf_ckl_wfp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdpf_ckl_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdpf_ckl_seawater_pressure'
var_list[5].name = 'ctdpf_ckl_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/RID26/01-ADCPTA000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSSM/RID26/01-ADCPTC000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/RID26/01-ADCPTA000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/RID26/01-ADCPTC000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD35/04-ADCPTM000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD35/04-ADCPTM000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD35/04-ADCPTC000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD35/04-ADCPSJ000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD37/07-ZPLSCC000/recovered_inst/zplsc_echogram_data'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD37/07-ZPLSCC000/recovered_inst/zplsc_echogram_data'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD37/07-ZPLSCC000/recovered_inst/zplsc_echogram_data'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD37/07-ZPLSCC000/recovered_inst/zplsc_echogram_data'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/SBD17/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/SBD11/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSSM/SBD11/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/SBD17/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/SBD11/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/SBD11/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/RID16/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/RID26/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSSM/RID26/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/RID16/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/RID26/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/RID26/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE09OSPM/WFP01/01-VEL3DK000/recovered_wfp/vel3d_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_k_eastward_velocity'
var_list[2].name = 'vel3d_k_northward_velocity'
var_list[3].name = 'vel3d_k_upward_velocity'
var_list[4].name = 'vel3d_k_heading'
var_list[5].name = 'vel3d_k_pitch'
var_list[6].name = 'vel3d_k_roll'
var_list[7].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'ddegrees'
var_list[5].units = 'ddegrees'
var_list[6].units = 'ddegrees'
var_list[7].units = 'dbar'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD35/01-VEL3DD000/recovered_inst/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD35/01-VEL3DD000/recovered_inst/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD35/01-VEL3DD000/recovered_inst/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD35/01-VEL3DD000/recovered_inst/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD35/02-PRESFA000/recovered_inst/presf_abc_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'presf_tide_pressure'
var_list[2].name = 'presf_tide_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD35/02-PRESFA000/recovered_inst/presf_abc_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'presf_tide_pressure'
var_list[2].name = 'presf_tide_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD35/02-PRESFB000/recovered_inst/presf_abc_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'presf_tide_pressure'
var_list[2].name = 'presf_tide_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD35/02-PRESFC000/recovered_inst/presf_abc_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'presf_tide_pressure'
var_list[2].name = 'presf_tide_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/RID16/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/RID26/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSSM/RID26/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/RID16/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/RID26/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/RID26/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD35/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD35/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD35/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD35/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/RID16/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD35/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/RID16/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD35/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD35/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD35/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE09OSPM/WFP01/05-PARADK000/recovered_wfp/parad_k__stc_imodem_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_k_par'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/RID16/07-NUTNRB000/recovered_inst/suna_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/RID26/07-NUTNRB000/recovered_inst/suna_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSSM/RID26/07-NUTNRB000/recovered_inst/suna_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/RID16/07-NUTNRB000/recovered_inst/suna_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/RID26/07-NUTNRB000/recovered_inst/suna_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/RID26/07-NUTNRB000/recovered_inst/suna_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'FDCHP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/SBD12/08-FDCHPA000/recovered_inst/fdchp_a_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/SBD17/06-FLORTD000/recovered_inst/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/SBD17/06-FLORTD000/recovered_inst/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE09OSPM/WFP01/04-FLORTK000/recovered_wfp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE09OSPM/WFP01/02-DOFSTK000/recovered_wfp/dofst_k_wfp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dofst_k_oxygen_l2'
var_list[2].name = 'dofst_k_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'Hz'
var_list[3].units = 'dbar'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/RID16/03-DOSTAD000/recovered_inst/dosta_abcdjm_ctdbp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[3].name = 'ctdbp_seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/RID16/03-DOSTAD000/recovered_inst/dosta_abcdjm_ctdbp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[3].name = 'ctdbp_seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD37/03-DOSTAD000/recovered_inst/dosta_abcdjm_ctdbp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[3].name = 'ctdbp_seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD37/03-DOSTAD000/recovered_inst/dosta_abcdjm_ctdbp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[3].name = 'ctdbp_seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD37/03-DOSTAD000/recovered_inst/dosta_abcdjm_ctdbp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[3].name = 'ctdbp_seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD37/03-DOSTAD000/recovered_inst/dosta_abcdjm_ctdbp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[3].name = 'ctdbp_seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD35/04-ADCPTM000/recovered_inst/adcpt_m_instrument_log9_recovered'
var_list[0].name = 'time'
var_list[1].name = 'significant_wave_height'
var_list[2].name = 'peak_wave_period'
var_list[3].name = 'peak_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'seconds'
var_list[3].units = 'degrees'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD35/04-ADCPTM000/recovered_inst/adcpt_m_instrument_log9_recovered'
var_list[0].name = 'time'
var_list[1].name = 'significant_wave_height'
var_list[2].name = 'peak_wave_period'
var_list[3].name = 'peak_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'seconds'
var_list[3].units = 'degrees'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'CTD' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/06-CTDBPN106/streamed/ctdbp_no_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_no_seawater_pressure'
var_list[5].name = 'ctdbp_no_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'CTD' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/06-CTDBPO108/streamed/ctdbp_no_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_no_seawater_pressure'
var_list[5].name = 'ctdbp_no_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'DOSTA' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/06-CTDBPN106/streamed/ctdbp_no_sample'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'DOSTA' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/06-CTDBPO108/streamed/ctdbp_no_sample'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'PHSEN' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/10-PHSEND103/streamed/phsen_data_record'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'PHSEN' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/10-PHSEND107/streamed/phsen_data_record'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'PCO2W' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/09-PCO2WB103/streamed/pco2w_b_sami_data_record'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'PCO2W' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/09-PCO2WB104/streamed/pco2w_b_sami_data_record'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'ADCP' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/05-ADCPTB104/streamed/adcp_velocity_beam'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'ADCP' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/05-ADCPSI103/streamed/adcp_velocity_beam'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'VEL3D' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/07-VEL3DC108/streamed/vel3d_cd_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'VEL3D' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/07-VEL3DC107/streamed/vel3d_cd_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'OPTAA' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/08-OPTAAD106/streamed/optaa_sample'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'OPTAA' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/08-OPTAAC104/streamed/optaa_sample'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#CSPP Data below
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSP/SP001/08-FLORTJ000/telemetered/flort_dj_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/08-FLORTJ000/recovered_cspp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSP/SP001/08-FLORTJ000/telemetered/flort_dj_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/08-FLORTJ000/recovered_cspp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSP/SP001/02-DOSTAJ000/telemetered/dosta_abcdjm_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[4].name = 'optode_temperature'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'umol/L'
var_list[4].units = 'degC'
var_list[5].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/02-DOSTAJ000/recovered_cspp/dosta_abcdjm_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[4].name = 'optode_temperature'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'umol/L'
var_list[4].units = 'degC'
var_list[5].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSP/SP001/02-DOSTAJ000/telemetered/dosta_abcdjm_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[4].name = 'optode_temperature'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'umol/L'
var_list[4].units = 'degC'
var_list[5].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/02-DOSTAJ000/recovered_cspp/dosta_abcdjm_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[4].name = 'optode_temperature'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'umol/L'
var_list[4].units = 'degC'
var_list[5].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSP/SP001/09-CTDPFJ000/telemetered/ctdpf_j_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temperature'
var_list[2].name = 'salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/09-CTDPFJ000/recovered_cspp/ctdpf_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temperature'
var_list[2].name = 'salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSP/SP001/09-CTDPFJ000/telemetered/ctdpf_j_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temperature'
var_list[2].name = 'salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/09-CTDPFJ000/recovered_cspp/ctdpf_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temperature'
var_list[2].name = 'salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSP/SP001/10-PARADJ000/telemetered/parad_j_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_j_par_counts_output'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/10-PARADJ000/recovered_cspp/parad_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_j_par_counts_output'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSP/SP001/10-PARADJ000/telemetered/parad_j_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_j_par_counts_output'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/10-PARADJ000/recovered_cspp/parad_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_j_par_counts_output'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'NUTNR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/06-NUTNRJ000/recovered_cspp/nutnr_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'salinity_corrected_nitrate'
var_list[2].name = 'nitrate_concentration'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
var_list[3].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'NUTNR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/06-NUTNRJ000/recovered_cspp/nutnr_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'salinity_corrected_nitrate'
var_list[2].name = 'nitrate_concentration'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
var_list[3].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSP/SP001/07-SPKIRJ000/telemetered/spkir_abj_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'SPKIR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/07-SPKIRJ000/recovered_cspp/spkir_abj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSP/SP001/07-SPKIRJ000/telemetered/spkir_abj_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'SPKIR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/07-SPKIRJ000/recovered_cspp/spkir_abj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSP/SP001/05-VELPTJ000/telemetered/velpt_j_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'velpt_j_eastward_velocity'
var_list[2].name = 'velpt_j_northward_velocity'
var_list[3].name = 'velpt_j_upward_velocity'
var_list[4].name = 'heading'
var_list[5].name = 'roll'
var_list[6].name = 'pitch'
var_list[7].name = 'temperature'
var_list[8].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'degrees'
var_list[5].units = 'degrees'
var_list[6].units = 'degrees'
var_list[7].units = 'degC'
var_list[8].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'VELPT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/05-VELPTJ000/recovered_cspp/velpt_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'velpt_j_eastward_velocity'
var_list[2].name = 'velpt_j_northward_velocity'
var_list[3].name = 'velpt_j_upward_velocity'
var_list[4].name = 'heading'
var_list[5].name = 'roll'
var_list[6].name = 'pitch'
var_list[7].name = 'temperature'
var_list[8].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'degrees'
var_list[5].units = 'degrees'
var_list[6].units = 'degrees'
var_list[7].units = 'degC'
var_list[8].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSP/SP001/05-VELPTJ000/telemetered/velpt_j_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'velpt_j_eastward_velocity'
var_list[2].name = 'velpt_j_northward_velocity'
var_list[3].name = 'velpt_j_upward_velocity'
var_list[4].name = 'heading'
var_list[5].name = 'roll'
var_list[6].name = 'pitch'
var_list[7].name = 'temperature'
var_list[8].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'degrees'
var_list[5].units = 'degrees'
var_list[6].units = 'degrees'
var_list[7].units = 'degC'
var_list[8].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'VELPT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/05-VELPTJ000/recovered_cspp/velpt_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'velpt_j_eastward_velocity'
var_list[2].name = 'velpt_j_northward_velocity'
var_list[3].name = 'velpt_j_upward_velocity'
var_list[4].name = 'heading'
var_list[5].name = 'roll'
var_list[6].name = 'pitch'
var_list[7].name = 'temperature'
var_list[8].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'degrees'
var_list[5].units = 'degrees'
var_list[6].units = 'degrees'
var_list[7].units = 'degC'
var_list[8].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'OPTAA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/04-OPTAAJ000/recovered_cspp/optaa_dj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'OPTAA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/04-OPTAAJ000/recovered_cspp/optaa_dj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/07-FLORTJ000/recovered_cspp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/07-FLORTJ000/recovered_cspp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/01-DOSTAJ000/recovered_cspp/dosta_abcdjm_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[4].name = 'optode_temperature'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'umol/L'
var_list[4].units = 'degC'
var_list[5].units = 'dbar'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/01-DOSTAJ000/recovered_cspp/dosta_abcdjm_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[4].name = 'optode_temperature'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'umol/L'
var_list[4].units = 'degC'
var_list[5].units = 'dbar'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/08-CTDPFJ000/recovered_cspp/ctdpf_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temperature'
var_list[2].name = 'salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/08-CTDPFJ000/recovered_cspp/ctdpf_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temperature'
var_list[2].name = 'salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/09-PARADJ000/recovered_cspp/parad_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_j_par_counts_output'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/09-PARADJ000/recovered_cspp/parad_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_j_par_counts_output'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'NUTNR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/05-NUTNRJ000/recovered_cspp/nutnr_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'salinity_corrected_nitrate'
var_list[2].name = 'nitrate_concentration'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
var_list[3].units = 'dbar'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'NUTNR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/05-NUTNRJ000/recovered_cspp/nutnr_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'salinity_corrected_nitrate'
var_list[2].name = 'nitrate_concentration'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
var_list[3].units = 'dbar'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'SPKIR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/06-SPKIRJ000/recovered_cspp/spkir_abj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'SPKIR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/06-SPKIRJ000/recovered_cspp/spkir_abj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'VELPT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/02-VELPTJ000/recovered_cspp/velpt_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'velpt_j_eastward_velocity'
var_list[2].name = 'velpt_j_northward_velocity'
var_list[3].name = 'velpt_j_upward_velocity'
var_list[4].name = 'heading'
var_list[5].name = 'roll'
var_list[6].name = 'pitch'
var_list[7].name = 'temperature'
var_list[8].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'degrees'
var_list[5].units = 'degrees'
var_list[6].units = 'degrees'
var_list[7].units = 'degC'
var_list[8].units = 'dbar'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'VELPT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/02-VELPTJ000/recovered_cspp/velpt_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'velpt_j_eastward_velocity'
var_list[2].name = 'velpt_j_northward_velocity'
var_list[3].name = 'velpt_j_upward_velocity'
var_list[4].name = 'heading'
var_list[5].name = 'roll'
var_list[6].name = 'pitch'
var_list[7].name = 'temperature'
var_list[8].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'degrees'
var_list[5].units = 'degrees'
var_list[6].units = 'degrees'
var_list[7].units = 'degC'
var_list[8].units = 'dbar'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'OPTAA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/04-OPTAAJ000/recovered_cspp/optaa_dj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'OPTAA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/04-OPTAAJ000/recovered_cspp/optaa_dj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL386/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL386/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL384/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL384/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL383/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL383/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL382/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL382/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL381/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL381/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL327/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL327/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL326/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL326/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL320/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL320/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL319/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL319/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL312/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL312/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL311/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL311/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL247/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL247/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL386/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL386/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL384/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL384/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL383/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL383/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL382/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL382/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL381/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL381/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL327/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL327/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL326/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL326/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL320/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL320/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL319/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL319/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL312/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL312/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL311/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL311/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL247/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL247/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL386/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL386/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL384/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL384/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL383/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL383/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL382/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL382/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL381/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL381/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL327/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL327/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL326/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL326/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL320/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL320/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL319/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL319/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL312/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL312/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL311/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL311/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL247/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL247/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL386/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL386/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL384/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL384/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL383/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL383/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL382/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL382/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL381/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL381/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL327/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL327/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL326/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL326/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL320/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL320/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL319/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL319/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL312/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL312/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL311/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL311/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL247/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL247/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL386/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL384/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL383/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL382/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL381/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL327/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL326/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL320/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL319/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL312/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL311/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL247/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD11/06-METBKA000/telemetered/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD11/06-METBKA000/recovered_host/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD11/06-METBKA000/telemetered/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD11/06-METBKA000/recovered_host/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD11/06-METBKA000/telemetered/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD11/06-METBKA000/recovered_host/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD11/06-METBKA000/telemetered/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD11/06-METBKA000/recovered_host/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_MeanDir' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_mean_directional'
var_list[0].name = 'time'
var_list[1].name = 'mean_direction'
var_list[2].name = 'number_bands'
var_list[3].name = 'initial_frequency'
var_list[4].name = 'frequency_spacing'
var_list[5].name = 'psd_mean_directional'
var_list[6].name = 'mean_direction_array'
var_list[7].name = 'directional_spread_array'
var_list[8].name = 'spread_direction'
var_list[9].name = 'wavss_a_directional_frequency'
var_list[10].name = 'wavss_a_corrected_mean_wave_direction'
var_list[11].name = 'wavss_a_corrected_directional_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degrees'
var_list[2].units = '1'
var_list[3].units = 'Hz'
var_list[4].units = 'Hz'
var_list[5].units = 'm2 Hz-1'
var_list[6].units = 'degrees'
var_list[7].units = 'degrees'
var_list[8].units = 'degrees'
var_list[9].units = 'Hz'
var_list[10].units = 'deg'
var_list[11].units = 'deg'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_MeanDir' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_mean_directional_recovered'
var_list[0].name = 'time'
var_list[1].name = 'mean_direction'
var_list[2].name = 'number_bands'
var_list[3].name = 'initial_frequency'
var_list[4].name = 'frequency_spacing'
var_list[5].name = 'psd_mean_directional'
var_list[6].name = 'mean_direction_array'
var_list[7].name = 'directional_spread_array'
var_list[8].name = 'spread_direction'
var_list[9].name = 'wavss_a_directional_frequency'
var_list[10].name = 'wavss_a_corrected_mean_wave_direction'
var_list[11].name = 'wavss_a_corrected_directional_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degrees'
var_list[2].units = '1'
var_list[3].units = 'Hz'
var_list[4].units = 'Hz'
var_list[5].units = 'm2 Hz-1'
var_list[6].units = 'degrees'
var_list[7].units = 'degrees'
var_list[8].units = 'degrees'
var_list[9].units = 'Hz'
var_list[10].units = 'deg'
var_list[11].units = 'deg'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_MeanDir' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_mean_directional'
var_list[0].name = 'time'
var_list[1].name = 'mean_direction'
var_list[2].name = 'number_bands'
var_list[3].name = 'initial_frequency'
var_list[4].name = 'frequency_spacing'
var_list[5].name = 'psd_mean_directional'
var_list[6].name = 'mean_direction_array'
var_list[7].name = 'directional_spread_array'
var_list[8].name = 'spread_direction'
var_list[9].name = 'wavss_a_directional_frequency'
var_list[10].name = 'wavss_a_corrected_mean_wave_direction'
var_list[11].name = 'wavss_a_corrected_directional_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degrees'
var_list[2].units = '1'
var_list[3].units = 'Hz'
var_list[4].units = 'Hz'
var_list[5].units = 'm2 Hz-1'
var_list[6].units = 'degrees'
var_list[7].units = 'degrees'
var_list[8].units = 'degrees'
var_list[9].units = 'Hz'
var_list[10].units = 'deg'
var_list[11].units = 'deg'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_MeanDir' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_mean_directional_recovered'
var_list[0].name = 'time'
var_list[1].name = 'mean_direction'
var_list[2].name = 'number_bands'
var_list[3].name = 'initial_frequency'
var_list[4].name = 'frequency_spacing'
var_list[5].name = 'psd_mean_directional'
var_list[6].name = 'mean_direction_array'
var_list[7].name = 'directional_spread_array'
var_list[8].name = 'spread_direction'
var_list[9].name = 'wavss_a_directional_frequency'
var_list[10].name = 'wavss_a_corrected_mean_wave_direction'
var_list[11].name = 'wavss_a_corrected_directional_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degrees'
var_list[2].units = '1'
var_list[3].units = 'Hz'
var_list[4].units = 'Hz'
var_list[5].units = 'm2 Hz-1'
var_list[6].units = 'degrees'
var_list[7].units = 'degrees'
var_list[8].units = 'degrees'
var_list[9].units = 'Hz'
var_list[10].units = 'deg'
var_list[11].units = 'deg'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_MeanDir' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_mean_directional'
var_list[0].name = 'time'
var_list[1].name = 'mean_direction'
var_list[2].name = 'number_bands'
var_list[3].name = 'initial_frequency'
var_list[4].name = 'frequency_spacing'
var_list[5].name = 'psd_mean_directional'
var_list[6].name = 'mean_direction_array'
var_list[7].name = 'directional_spread_array'
var_list[8].name = 'spread_direction'
var_list[9].name = 'wavss_a_directional_frequency'
var_list[10].name = 'wavss_a_corrected_mean_wave_direction'
var_list[11].name = 'wavss_a_corrected_directional_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degrees'
var_list[2].units = '1'
var_list[3].units = 'Hz'
var_list[4].units = 'Hz'
var_list[5].units = 'm2 Hz-1'
var_list[6].units = 'degrees'
var_list[7].units = 'degrees'
var_list[8].units = 'degrees'
var_list[9].units = 'Hz'
var_list[10].units = 'deg'
var_list[11].units = 'deg'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_MeanDir' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_mean_directional_recovered'
var_list[0].name = 'time'
var_list[1].name = 'mean_direction'
var_list[2].name = 'number_bands'
var_list[3].name = 'initial_frequency'
var_list[4].name = 'frequency_spacing'
var_list[5].name = 'psd_mean_directional'
var_list[6].name = 'mean_direction_array'
var_list[7].name = 'directional_spread_array'
var_list[8].name = 'spread_direction'
var_list[9].name = 'wavss_a_directional_frequency'
var_list[10].name = 'wavss_a_corrected_mean_wave_direction'
var_list[11].name = 'wavss_a_corrected_directional_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degrees'
var_list[2].units = '1'
var_list[3].units = 'Hz'
var_list[4].units = 'Hz'
var_list[5].units = 'm2 Hz-1'
var_list[6].units = 'degrees'
var_list[7].units = 'degrees'
var_list[8].units = 'degrees'
var_list[9].units = 'Hz'
var_list[10].units = 'deg'
var_list[11].units = 'deg'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_MeanDir' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_mean_directional'
var_list[0].name = 'time'
var_list[1].name = 'mean_direction'
var_list[2].name = 'number_bands'
var_list[3].name = 'initial_frequency'
var_list[4].name = 'frequency_spacing'
var_list[5].name = 'psd_mean_directional'
var_list[6].name = 'mean_direction_array'
var_list[7].name = 'directional_spread_array'
var_list[8].name = 'spread_direction'
var_list[9].name = 'wavss_a_directional_frequency'
var_list[10].name = 'wavss_a_corrected_mean_wave_direction'
var_list[11].name = 'wavss_a_corrected_directional_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degrees'
var_list[2].units = '1'
var_list[3].units = 'Hz'
var_list[4].units = 'Hz'
var_list[5].units = 'm2 Hz-1'
var_list[6].units = 'degrees'
var_list[7].units = 'degrees'
var_list[8].units = 'degrees'
var_list[9].units = 'Hz'
var_list[10].units = 'deg'
var_list[11].units = 'deg'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_MeanDir' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_mean_directional_recovered'
var_list[0].name = 'time'
var_list[1].name = 'mean_direction'
var_list[2].name = 'number_bands'
var_list[3].name = 'initial_frequency'
var_list[4].name = 'frequency_spacing'
var_list[5].name = 'psd_mean_directional'
var_list[6].name = 'mean_direction_array'
var_list[7].name = 'directional_spread_array'
var_list[8].name = 'spread_direction'
var_list[9].name = 'wavss_a_directional_frequency'
var_list[10].name = 'wavss_a_corrected_mean_wave_direction'
var_list[11].name = 'wavss_a_corrected_directional_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degrees'
var_list[2].units = '1'
var_list[3].units = 'Hz'
var_list[4].units = 'Hz'
var_list[5].units = 'm2 Hz-1'
var_list[6].units = 'degrees'
var_list[7].units = 'degrees'
var_list[8].units = 'degrees'
var_list[9].units = 'Hz'
var_list[10].units = 'deg'
var_list[11].units = 'deg'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_NonDir' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_non_directional'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'psd_non_directional'
var_list[5].name = 'wavss_a_non_directional_frequency'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = 'm2 Hz-1'
var_list[5].units = 'Hz'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_NonDir' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_non_directional_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'psd_non_directional'
var_list[5].name = 'wavss_a_non_directional_frequency'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = 'm2 Hz-1'
var_list[5].units = 'Hz'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_NonDir' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_non_directional'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'psd_non_directional'
var_list[5].name = 'wavss_a_non_directional_frequency'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = 'm2 Hz-1'
var_list[5].units = 'Hz'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_NonDir' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_non_directional_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'psd_non_directional'
var_list[5].name = 'wavss_a_non_directional_frequency'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = 'm2 Hz-1'
var_list[5].units = 'Hz'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_NonDir' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_non_directional'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'psd_non_directional'
var_list[5].name = 'wavss_a_non_directional_frequency'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = 'm2 Hz-1'
var_list[5].units = 'Hz'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_NonDir' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_non_directional_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'psd_non_directional'
var_list[5].name = 'wavss_a_non_directional_frequency'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = 'm2 Hz-1'
var_list[5].units = 'Hz'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_NonDir' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_non_directional'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'psd_non_directional'
var_list[5].name = 'wavss_a_non_directional_frequency'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = 'm2 Hz-1'
var_list[5].units = 'Hz'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_NonDir' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_non_directional_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'psd_non_directional'
var_list[5].name = 'wavss_a_non_directional_frequency'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = 'm2 Hz-1'
var_list[5].units = 'Hz'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Motion' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_motion'
var_list[0].name = 'time'
var_list[1].name = 'number_time_samples'
var_list[2].name = 'initial_time'
var_list[3].name = 'time_spacing'
var_list[4].name = 'solution_found'
var_list[5].name = 'heave_offset_array'
var_list[6].name = 'north_offset_array'
var_list[7].name = 'east_offset_array'
var_list[8].name = 'wavss_a_buoymotion_time'
var_list[9].name = 'wavss_a_magcor_buoymotion_x'
var_list[10].name = 'wavss_a_magcor_buoymotion_y'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'sec'
var_list[3].units = 'sec'
var_list[4].units = '1'
var_list[5].units = 'm'
var_list[6].units = 'm'
var_list[7].units = 'm'
var_list[8].units = 'seconds since 1900-01-01'
var_list[9].units = 'm'
var_list[10].units = 'm'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Motion' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_motion_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_time_samples'
var_list[2].name = 'initial_time'
var_list[3].name = 'time_spacing'
var_list[4].name = 'solution_found'
var_list[5].name = 'heave_offset_array'
var_list[6].name = 'north_offset_array'
var_list[7].name = 'east_offset_array'
var_list[8].name = 'wavss_a_buoymotion_time'
var_list[9].name = 'wavss_a_magcor_buoymotion_x'
var_list[10].name = 'wavss_a_magcor_buoymotion_y'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'sec'
var_list[3].units = 'sec'
var_list[4].units = '1'
var_list[5].units = 'm'
var_list[6].units = 'm'
var_list[7].units = 'm'
var_list[8].units = 'seconds since 1900-01-01'
var_list[9].units = 'm'
var_list[10].units = 'm'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Motion' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_motion'
var_list[0].name = 'time'
var_list[1].name = 'number_time_samples'
var_list[2].name = 'initial_time'
var_list[3].name = 'time_spacing'
var_list[4].name = 'solution_found'
var_list[5].name = 'heave_offset_array'
var_list[6].name = 'north_offset_array'
var_list[7].name = 'east_offset_array'
var_list[8].name = 'wavss_a_buoymotion_time'
var_list[9].name = 'wavss_a_magcor_buoymotion_x'
var_list[10].name = 'wavss_a_magcor_buoymotion_y'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'sec'
var_list[3].units = 'sec'
var_list[4].units = '1'
var_list[5].units = 'm'
var_list[6].units = 'm'
var_list[7].units = 'm'
var_list[8].units = 'seconds since 1900-01-01'
var_list[9].units = 'm'
var_list[10].units = 'm'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Motion' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_motion_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_time_samples'
var_list[2].name = 'initial_time'
var_list[3].name = 'time_spacing'
var_list[4].name = 'solution_found'
var_list[5].name = 'heave_offset_array'
var_list[6].name = 'north_offset_array'
var_list[7].name = 'east_offset_array'
var_list[8].name = 'wavss_a_buoymotion_time'
var_list[9].name = 'wavss_a_magcor_buoymotion_x'
var_list[10].name = 'wavss_a_magcor_buoymotion_y'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'sec'
var_list[3].units = 'sec'
var_list[4].units = '1'
var_list[5].units = 'm'
var_list[6].units = 'm'
var_list[7].units = 'm'
var_list[8].units = 'seconds since 1900-01-01'
var_list[9].units = 'm'
var_list[10].units = 'm'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Motion' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_motion'
var_list[0].name = 'time'
var_list[1].name = 'number_time_samples'
var_list[2].name = 'initial_time'
var_list[3].name = 'time_spacing'
var_list[4].name = 'solution_found'
var_list[5].name = 'heave_offset_array'
var_list[6].name = 'north_offset_array'
var_list[7].name = 'east_offset_array'
var_list[8].name = 'wavss_a_buoymotion_time'
var_list[9].name = 'wavss_a_magcor_buoymotion_x'
var_list[10].name = 'wavss_a_magcor_buoymotion_y'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'sec'
var_list[3].units = 'sec'
var_list[4].units = '1'
var_list[5].units = 'm'
var_list[6].units = 'm'
var_list[7].units = 'm'
var_list[8].units = 'seconds since 1900-01-01'
var_list[9].units = 'm'
var_list[10].units = 'm'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Motion' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_motion_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_time_samples'
var_list[2].name = 'initial_time'
var_list[3].name = 'time_spacing'
var_list[4].name = 'solution_found'
var_list[5].name = 'heave_offset_array'
var_list[6].name = 'north_offset_array'
var_list[7].name = 'east_offset_array'
var_list[8].name = 'wavss_a_buoymotion_time'
var_list[9].name = 'wavss_a_magcor_buoymotion_x'
var_list[10].name = 'wavss_a_magcor_buoymotion_y'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'sec'
var_list[3].units = 'sec'
var_list[4].units = '1'
var_list[5].units = 'm'
var_list[6].units = 'm'
var_list[7].units = 'm'
var_list[8].units = 'seconds since 1900-01-01'
var_list[9].units = 'm'
var_list[10].units = 'm'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Motion' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_motion'
var_list[0].name = 'time'
var_list[1].name = 'number_time_samples'
var_list[2].name = 'initial_time'
var_list[3].name = 'time_spacing'
var_list[4].name = 'solution_found'
var_list[5].name = 'heave_offset_array'
var_list[6].name = 'north_offset_array'
var_list[7].name = 'east_offset_array'
var_list[8].name = 'wavss_a_buoymotion_time'
var_list[9].name = 'wavss_a_magcor_buoymotion_x'
var_list[10].name = 'wavss_a_magcor_buoymotion_y'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'sec'
var_list[3].units = 'sec'
var_list[4].units = '1'
var_list[5].units = 'm'
var_list[6].units = 'm'
var_list[7].units = 'm'
var_list[8].units = 'seconds since 1900-01-01'
var_list[9].units = 'm'
var_list[10].units = 'm'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Motion' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_motion_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_time_samples'
var_list[2].name = 'initial_time'
var_list[3].name = 'time_spacing'
var_list[4].name = 'solution_found'
var_list[5].name = 'heave_offset_array'
var_list[6].name = 'north_offset_array'
var_list[7].name = 'east_offset_array'
var_list[8].name = 'wavss_a_buoymotion_time'
var_list[9].name = 'wavss_a_magcor_buoymotion_x'
var_list[10].name = 'wavss_a_magcor_buoymotion_y'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'sec'
var_list[3].units = 'sec'
var_list[4].units = '1'
var_list[5].units = 'm'
var_list[6].units = 'm'
var_list[7].units = 'm'
var_list[8].units = 'seconds since 1900-01-01'
var_list[9].units = 'm'
var_list[10].units = 'm'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Fourier' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_fourier'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'number_directional_bands'
var_list[5].name = 'initial_directional_frequency'
var_list[6].name = 'directional_frequency_spacing'
var_list[7].name = 'fourier_coefficient_2d_array'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = '1'
var_list[5].units = 'Hz'
var_list[6].units = 'Hz'
var_list[7].units = '1'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Fourier' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_fourier_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'number_directional_bands'
var_list[5].name = 'initial_directional_frequency'
var_list[6].name = 'directional_frequency_spacing'
var_list[7].name = 'fourier_coefficient_2d_array'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = '1'
var_list[5].units = 'Hz'
var_list[6].units = 'Hz'
var_list[7].units = '1'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Fourier' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_fourier'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'number_directional_bands'
var_list[5].name = 'initial_directional_frequency'
var_list[6].name = 'directional_frequency_spacing'
var_list[7].name = 'fourier_coefficient_2d_array'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = '1'
var_list[5].units = 'Hz'
var_list[6].units = 'Hz'
var_list[7].units = '1'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Fourier' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_fourier_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'number_directional_bands'
var_list[5].name = 'initial_directional_frequency'
var_list[6].name = 'directional_frequency_spacing'
var_list[7].name = 'fourier_coefficient_2d_array'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = '1'
var_list[5].units = 'Hz'
var_list[6].units = 'Hz'
var_list[7].units = '1'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Fourier' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_fourier'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'number_directional_bands'
var_list[5].name = 'initial_directional_frequency'
var_list[6].name = 'directional_frequency_spacing'
var_list[7].name = 'fourier_coefficient_2d_array'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = '1'
var_list[5].units = 'Hz'
var_list[6].units = 'Hz'
var_list[7].units = '1'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Fourier' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_fourier_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'number_directional_bands'
var_list[5].name = 'initial_directional_frequency'
var_list[6].name = 'directional_frequency_spacing'
var_list[7].name = 'fourier_coefficient_2d_array'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = '1'
var_list[5].units = 'Hz'
var_list[6].units = 'Hz'
var_list[7].units = '1'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Fourier' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_fourier'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'number_directional_bands'
var_list[5].name = 'initial_directional_frequency'
var_list[6].name = 'directional_frequency_spacing'
var_list[7].name = 'fourier_coefficient_2d_array'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = '1'
var_list[5].units = 'Hz'
var_list[6].units = 'Hz'
var_list[7].units = '1'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Fourier' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_fourier_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'number_directional_bands'
var_list[5].name = 'initial_directional_frequency'
var_list[6].name = 'directional_frequency_spacing'
var_list[7].name = 'fourier_coefficient_2d_array'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = '1'
var_list[5].units = 'Hz'
var_list[6].units = 'Hz'
var_list[7].units = '1'
elif platform_name == 'CE04OSPS' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/SF01B/2A-CTDPFA107/streamed/ctdpf_sbe43_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'seawater_pressure'
var_list[5].name = 'seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSPD' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSPD/DP01B/01-CTDPFL105/recovered_inst/dpc_ctd_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'dpc_ctd_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSPD' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE04OSPD/DP01B/01-CTDPFL105/recovered_wfp/dpc_ctd_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'dpc_ctd_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSPS' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/SF01B/2A-CTDPFA107/streamed/ctdpf_sbe43_sample'
var_list[0].name = 'time'
var_list[1].name = 'corrected_dissolved_oxygen'
var_list[2].name = 'seawater_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'dbar'
elif platform_name == 'CE04OSPD' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSPD/DP01B/06-DOSTAD105/recovered_inst/dpc_optode_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'dbar'
elif platform_name == 'CE04OSPD' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE04OSPD/DP01B/06-DOSTAD105/recovered_wfp/dpc_optode_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'dbar'
elif platform_name == 'CE04OSPS' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/SF01B/3A-FLORTD104/streamed/flort_d_data_record'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE04OSPD' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSPD/DP01B/04-FLNTUA103/recovered_inst/dpc_flnturtd_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'flntu_x_mmp_cds_fluorometric_chlorophyll_a'
var_list[2].name = 'flntu_x_mmp_cds_total_volume_scattering_coefficient '
var_list[3].name = 'flntu_x_mmp_cds_bback_total'
var_list[4].name = 'flcdr_x_mmp_cds_fluorometric_cdom'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'ug/L'
var_list[2].units = 'm-1 sr-1'
var_list[3].units = 'm-1'
var_list[4].units = 'ppb'
var_list[5].units = 'dbar'
elif platform_name == 'CE04OSPD' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE04OSPD/DP01B/03-FLCDRA103/recovered_wfp/dpc_flcdrtd_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'flntu_x_mmp_cds_fluorometric_chlorophyll_a'
var_list[2].name = 'flntu_x_mmp_cds_total_volume_scattering_coefficient '
var_list[3].name = 'flntu_x_mmp_cds_bback_total'
var_list[4].name = 'flcdr_x_mmp_cds_fluorometric_cdom'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'ug/L'
var_list[2].units = 'm-1 sr-1'
var_list[3].units = 'm-1'
var_list[4].units = 'ppb'
var_list[5].units = 'dbar'
elif platform_name == 'CE04OSPS' and node == 'PROFILER' and instrument_class == 'PHSEN' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/SF01B/2B-PHSENA108/streamed/phsen_data_record'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'ph_seawater'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'dbar'
elif platform_name == 'CE04OSPS' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/SF01B/3C-PARADA102/streamed/parad_sa_sample'
var_list[0].name = 'time'
var_list[1].name = 'par_counts_output'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE04OSPS' and node == 'PROFILER' and instrument_class == 'SPKIR' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/SF01B/3D-SPKIRA102/streamed/spkir_data_record'
var_list[0].name = 'time'
var_list[1].name = 'spkir_downwelling_vector'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE04OSPS' and node == 'PROFILER' and instrument_class == 'NUTNR' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/SF01B/4A-NUTNRA102/streamed/nutnr_a_sample'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
var_list[3].units = 'dbar'
elif platform_name == 'CE04OSPS' and node == 'PROFILER' and instrument_class == 'PCO2W' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/SF01B/4F-PCO2WA102/streamed/pco2w_a_sami_data_record'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
var_list[3].units = 'dbar'
elif platform_name == 'CE04OSPS' and node == 'PROFILER' and instrument_class == 'VELPT' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/SF01B/4B-VELPTD106/streamed/velpt_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'velpt_d_eastward_velocity'
var_list[2].name = 'velpt_d_northward_velocity'
var_list[3].name = 'velpt_d_upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[9].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
var_list[9].units = 'dbar'
elif platform_name == 'CE04OSPD' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSPD/DP01B/02-VEL3DA105/recovered_inst/dpc_acm_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_a_eastward_velocity'
var_list[2].name = 'vel3d_a_northward_velocity'
var_list[3].name = 'vel3d_a_upward_velocity_ascending'
var_list[4].name = 'vel3d_a_upward_velocity_descending'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'm/s'
var_list[5].units = 'dbar'
elif platform_name == 'CE04OSPD' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE04OSPD/DP01B/02-VEL3DA105/recovered_wfp/dpc_acm_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_a_eastward_velocity'
var_list[2].name = 'vel3d_a_northward_velocity'
var_list[3].name = 'vel3d_a_upward_velocity_ascending'
var_list[4].name = 'vel3d_a_upward_velocity_descending'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'm/s'
var_list[5].units = 'dbar'
elif platform_name == 'CE04OSPS' and node == 'PLATFORM200M' and instrument_class == 'CTD' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/PC01B/4A-CTDPFA109/streamed/ctdpf_optode_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'seawater_pressure'
var_list[5].name = 'seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSPS' and node == 'PLATFORM200M' and instrument_class == 'DOSTA' and method == 'Streamed':
#uframe_dataset_name = 'CE04OSPS/PC01B/4A-DOSTAD109/streamed/ctdpf_optode_sample'
uframe_dataset_name = 'CE04OSPS/PC01B/4A-CTDPFA109/streamed/ctdpf_optode_sample'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'seawater_pressure' #also use this for the '4A-DOSTAD109/streamed/ctdpf_optode_sample' stream
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'dbar'
elif platform_name == 'CE04OSPS' and node == 'PLATFORM200M' and instrument_class == 'PHSEN' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/PC01B/4B-PHSENA106/streamed/phsen_data_record'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE04OSPS' and node == 'PLATFORM200M' and instrument_class == 'PCO2W' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/PC01B/4D-PCO2WA105/streamed/pco2w_a_sami_data_record'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
#Coastal Pioneer CSM Data Streams
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'METBK2' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/SBD12/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'METBK2' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/SBD12/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CP03ISSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CP03ISSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CP04OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CP04OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
#WAVSS
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_statistics'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_statistics_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'WAVSS_MeanDir' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_mean_directional'
var_list[0].name = 'time'
var_list[1].name = 'mean_direction'
var_list[2].name = 'number_bands'
var_list[3].name = 'initial_frequency'
var_list[4].name = 'frequency_spacing'
var_list[5].name = 'psd_mean_directional'
var_list[6].name = 'mean_direction_array'
var_list[7].name = 'directional_spread_array'
var_list[8].name = 'spread_direction'
var_list[9].name = 'wavss_a_directional_frequency'
var_list[10].name = 'wavss_a_corrected_mean_wave_direction'
var_list[11].name = 'wavss_a_corrected_directional_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degrees'
var_list[2].units = '1'
var_list[3].units = 'Hz'
var_list[4].units = 'Hz'
var_list[5].units = 'm2 Hz-1'
var_list[6].units = 'degrees'
var_list[7].units = 'degrees'
var_list[8].units = 'degrees'
var_list[9].units = 'Hz'
var_list[10].units = 'deg'
var_list[11].units = 'deg'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'WAVSS_MeanDir' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_mean_directional_recovered'
var_list[0].name = 'time'
var_list[1].name = 'mean_direction'
var_list[2].name = 'number_bands'
var_list[3].name = 'initial_frequency'
var_list[4].name = 'frequency_spacing'
var_list[5].name = 'psd_mean_directional'
var_list[6].name = 'mean_direction_array'
var_list[7].name = 'directional_spread_array'
var_list[8].name = 'spread_direction'
var_list[9].name = 'wavss_a_directional_frequency'
var_list[10].name = 'wavss_a_corrected_mean_wave_direction'
var_list[11].name = 'wavss_a_corrected_directional_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degrees'
var_list[2].units = '1'
var_list[3].units = 'Hz'
var_list[4].units = 'Hz'
var_list[5].units = 'm2 Hz-1'
var_list[6].units = 'degrees'
var_list[7].units = 'degrees'
var_list[8].units = 'degrees'
var_list[9].units = 'Hz'
var_list[10].units = 'deg'
var_list[11].units = 'deg'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'WAVSS_NonDir' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_non_directional'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'psd_non_directional'
var_list[5].name = 'wavss_a_non_directional_frequency'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = 'm2 Hz-1'
var_list[5].units = 'Hz'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'WAVSS_NonDir' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_non_directional_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'psd_non_directional'
var_list[5].name = 'wavss_a_non_directional_frequency'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = 'm2 Hz-1'
var_list[5].units = 'Hz'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'WAVSS_Motion' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_motion'
var_list[0].name = 'time'
var_list[1].name = 'number_time_samples'
var_list[2].name = 'initial_time'
var_list[3].name = 'time_spacing'
var_list[4].name = 'solution_found'
var_list[5].name = 'heave_offset_array'
var_list[6].name = 'north_offset_array'
var_list[7].name = 'east_offset_array'
var_list[8].name = 'wavss_a_buoymotion_time'
var_list[9].name = 'wavss_a_magcor_buoymotion_x'
var_list[10].name = 'wavss_a_magcor_buoymotion_y'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'sec'
var_list[3].units = 'sec'
var_list[4].units = '1'
var_list[5].units = 'm'
var_list[6].units = 'm'
var_list[7].units = 'm'
var_list[8].units = 'seconds since 1900-01-01'
var_list[9].units = 'm'
var_list[10].units = 'm'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'WAVSS_Motion' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_motion_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_time_samples'
var_list[2].name = 'initial_time'
var_list[3].name = 'time_spacing'
var_list[4].name = 'solution_found'
var_list[5].name = 'heave_offset_array'
var_list[6].name = 'north_offset_array'
var_list[7].name = 'east_offset_array'
var_list[8].name = 'wavss_a_buoymotion_time'
var_list[9].name = 'wavss_a_magcor_buoymotion_x'
var_list[10].name = 'wavss_a_magcor_buoymotion_y'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'sec'
var_list[3].units = 'sec'
var_list[4].units = '1'
var_list[5].units = 'm'
var_list[6].units = 'm'
var_list[7].units = 'm'
var_list[8].units = 'seconds since 1900-01-01'
var_list[9].units = 'm'
var_list[10].units = 'm'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'WAVSS_Fourier' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_fourier'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'number_directional_bands'
var_list[5].name = 'initial_directional_frequency'
var_list[6].name = 'directional_frequency_spacing'
var_list[7].name = 'fourier_coefficient_2d_array'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = '1'
var_list[5].units = 'Hz'
var_list[6].units = 'Hz'
var_list[7].units = '1'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'WAVSS_Fourier' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_fourier_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'number_directional_bands'
var_list[5].name = 'initial_directional_frequency'
var_list[6].name = 'directional_frequency_spacing'
var_list[7].name = 'fourier_coefficient_2d_array'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = '1'
var_list[5].units = 'Hz'
var_list[6].units = 'Hz'
var_list[7].units = '1'
#PCO2A
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CP03ISSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CP04OSSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
#PCO2A
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/SBD12/04-PCO2AA000/recovered_host/pco2a_a_dcl_instrument_water_recovered'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CP03ISSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/SBD12/04-PCO2AA000/recovered_host/pco2a_a_dcl_instrument_water_recovered'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CP04OSSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/SBD12/04-PCO2AA000/recovered_host/pco2a_a_dcl_instrument_water_recovered'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
#FDCHP
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'FDCHP' and method == 'RecoveredInst':
uframe_dataset_name = 'CP01CNSM/SBD12/08-FDCHPA000/recovered_inst/fdchp_a_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'FDCHP' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/SBD12/08-FDCHPA000/telemetered/fdchp_a_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'FDCHP' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/SBD12/08-FDCHPA000/recovered_host/fdchp_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/SBD11/06-METBKA000/telemetered/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/SBD11/06-METBKA000/recovered_host/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CP03ISSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/SBD11/06-METBKA000/telemetered/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CP03ISSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/SBD11/06-METBKA000/recovered_host/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CP04OSSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/SBD11/06-METBKA000/telemetered/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CP04OSSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/SBD11/06-METBKA000/recovered_host/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'METBK2-hr' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/SBD12/06-METBKA000/telemetered/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'METBK2-hr' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/SBD12/06-METBKA000/recovered_host/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/RID27/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CP01CNSM/RID27/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/RID27/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CP03ISSM/RID27/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/RID27/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CP04OSSM/RID27/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/MFD37/03-CTDBPE000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/MFD37/03-CTDBPE000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CP04OSSM/MFD37/03-CTDBPE000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/MFD37/03-CTDBPD000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/MFD37/03-CTDBPD000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CP03ISSM/MFD37/03-CTDBPD000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/MFD37/03-CTDBPD000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/MFD37/03-CTDBPD000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CP01CNSM/MFD37/03-CTDBPD000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/MFD37/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/RID27/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/MFD37/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/MFD37/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/RID27/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/MFD37/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/MFD37/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/RID27/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/MFD37/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CP01CNSM/RID26/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CP03ISSM/RID26/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CP04OSSM/RID26/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/RID26/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/RID26/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/RID26/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/RID27/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/RID27/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/RID27/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/RID26/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/RID26/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/RID26/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/RID27/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/RID27/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/RID27/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/RID26/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CP01CNSM/RID26/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/RID26/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CP03ISSM/RID26/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/RID26/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CP04OSSM/RID26/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/MFD35/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/MFD35/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/MFD35/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CP01CNSM/MFD35/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CP03ISSM/MFD35/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CP04OSSM/MFD35/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CP01CNSM/MFD35/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/MFD35/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CP03ISSM/MFD35/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/MFD35/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CP04OSSM/MFD35/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/MFD35/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/MFD35/02-PRESFB000/recovered_host/presf_abc_dcl_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredInst':
uframe_dataset_name = 'CP01CNSM/MFD35/02-PRESFB000/recovered_inst/presf_abc_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'presf_tide_pressure'
var_list[2].name = 'presf_tide_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/MFD35/02-PRESFB000/telemetered/presf_abc_dcl_tide_measurement'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/MFD35/02-PRESFB000/recovered_host/presf_abc_dcl_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredInst':
uframe_dataset_name = 'CP03ISSM/MFD35/02-PRESFB000/recovered_inst/presf_abc_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'presf_tide_pressure'
var_list[2].name = 'presf_tide_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/MFD35/02-PRESFB000/telemetered/presf_abc_dcl_tide_measurement'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/MFD35/02-PRESFC000/recovered_host/presf_abc_dcl_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredInst':
uframe_dataset_name = 'CP04OSSM/MFD35/02-PRESFC000/recovered_inst/presf_abc_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'presf_tide_pressure'
var_list[2].name = 'presf_tide_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/MFD35/02-PRESFC000/telemetered/presf_abc_dcl_tide_measurement'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CP01CNSM/MFD35/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/MFD35/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/MFD35/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CP03ISSM/MFD35/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/MFD35/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/MFD35/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CP04OSSM/MFD35/04-VELPTB000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/MFD35/04-VELPTB000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/MFD35/04-VELPTB000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/MFD37/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/MFD37/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/MFD37/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/MFD37/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/MFD37/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/MFD37/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredInst':
uframe_dataset_name = 'CP01CNSM/MFD37/07-ZPLSCC000/recovered_inst/zplsc_echogram_data'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredInst':
uframe_dataset_name = 'CP03ISSM/MFD37/07-ZPLSCC000/recovered_inst/zplsc_echogram_data'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredInst':
uframe_dataset_name = 'CP04OSSM/MFD37/07-ZPLSCC000/recovered_inst/zplsc_echogram_data'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/MFD35/01-ADCPTF000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CP01CNSM/MFD35/01-ADCPTF000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/MFD35/01-ADCPTF000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CP03ISSM/MFD35/01-ADCPTF000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/MFD35/01-ADCPSJ000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CP04OSSM/MFD35/01-ADCPSJ000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
#Coastal Pioneer WireFollowing Profilers (WFP
elif platform_name == 'CP04OSPM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSPM/SBS11/02-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP04OSPM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSPM/SBS11/02-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP04OSPM' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSPM/WFP01/04-FLORTK000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CP04OSPM' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP04OSPM/WFP01/04-FLORTK000/recovered_wfp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CP04OSPM' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSPM/WFP01/02-DOFSTK000/telemetered/dofst_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dofst_k_oxygen_l2'
var_list[2].name = 'dofst_k_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'Hz'
var_list[3].units = 'dbar'
elif platform_name == 'CP04OSPM' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP04OSPM/WFP01/02-DOFSTK000/recovered_wfp/dofst_k_wfp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dofst_k_oxygen_l2'
var_list[2].name = 'dofst_k_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'Hz'
var_list[3].units = 'dbar'
elif platform_name == 'CP04OSPM' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSPM/WFP01/01-VEL3DK000/telemetered/vel3d_k_wfp_stc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_k_eastward_velocity'
var_list[2].name = 'vel3d_k_northward_velocity'
var_list[3].name = 'vel3d_k_upward_velocity'
var_list[4].name = 'vel3d_k_heading'
var_list[5].name = 'vel3d_k_pitch'
var_list[6].name = 'vel3d_k_roll'
var_list[7].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'ddegrees'
var_list[5].units = 'ddegrees'
var_list[6].units = 'ddegrees'
var_list[7].units = 'dbar'
elif platform_name == 'CP04OSPM' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP04OSPM/WFP01/01-VEL3DK000/recovered_wfp/vel3d_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_k_eastward_velocity'
var_list[2].name = 'vel3d_k_northward_velocity'
var_list[3].name = 'vel3d_k_upward_velocity'
var_list[4].name = 'vel3d_k_heading'
var_list[5].name = 'vel3d_k_pitch'
var_list[6].name = 'vel3d_k_roll'
var_list[7].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'ddegrees'
var_list[5].units = 'ddegrees'
var_list[6].units = 'ddegrees'
var_list[7].units = 'dbar'
elif platform_name == 'CP04OSPM' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSPM/WFP01/03-CTDPFK000/telemetered/ctdpf_ckl_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'ctdpf_ckl_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdpf_ckl_seawater_pressure'
var_list[5].name = 'ctdpf_ckl_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP04OSPM' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP04OSPM/WFP01/03-CTDPFK000/recovered_wfp/ctdpf_ckl_wfp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdpf_ckl_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdpf_ckl_seawater_pressure'
var_list[5].name = 'ctdpf_ckl_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP04OSPM' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSPM/WFP01/05-PARADK000/telemetered/parad_k__stc_imodem_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_k_par'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CP04OSPM' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP04OSPM/WFP01/05-PARADK000/recovered_wfp/parad_k__stc_imodem_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_k_par'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CP01CNPM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNPM/SBS01/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP01CNPM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNPM/SBS01/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP01CNPM' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNPM/WFP01/04-FLORTK000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CP01CNPM' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP01CNPM/WFP01/04-FLORTK000/recovered_wfp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CP01CNPM' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNPM/WFP01/02-DOFSTK000/telemetered/dofst_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dofst_k_oxygen_l2'
var_list[2].name = 'dofst_k_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'Hz'
var_list[3].units = 'dbar'
elif platform_name == 'CP01CNPM' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP01CNPM/WFP01/02-DOFSTK000/recovered_wfp/dofst_k_wfp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dofst_k_oxygen_l2'
var_list[2].name = 'dofst_k_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'Hz'
var_list[3].units = 'dbar'
elif platform_name == 'CP01CNPM' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNPM/WFP01/01-VEL3DK000/telemetered/vel3d_k_wfp_stc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_k_eastward_velocity'
var_list[2].name = 'vel3d_k_northward_velocity'
var_list[3].name = 'vel3d_k_upward_velocity'
var_list[4].name = 'vel3d_k_heading'
var_list[5].name = 'vel3d_k_pitch'
var_list[6].name = 'vel3d_k_roll'
var_list[7].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'ddegrees'
var_list[5].units = 'ddegrees'
var_list[6].units = 'ddegrees'
var_list[7].units = 'dbar'
elif platform_name == 'CP01CNPM' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP01CNPM/WFP01/01-VEL3DK000/recovered_wfp/vel3d_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_k_eastward_velocity'
var_list[2].name = 'vel3d_k_northward_velocity'
var_list[3].name = 'vel3d_k_upward_velocity'
var_list[4].name = 'vel3d_k_heading'
var_list[5].name = 'vel3d_k_pitch'
var_list[6].name = 'vel3d_k_roll'
var_list[7].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'ddegrees'
var_list[5].units = 'ddegrees'
var_list[6].units = 'ddegrees'
var_list[7].units = 'dbar'
elif platform_name == 'CP01CNPM' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNPM/WFP01/03-CTDPFK000/telemetered/ctdpf_ckl_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'ctdpf_ckl_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdpf_ckl_seawater_pressure'
var_list[5].name = 'ctdpf_ckl_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP01CNPM' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP01CNPM/WFP01/03-CTDPFK000/recovered_wfp/ctdpf_ckl_wfp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdpf_ckl_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdpf_ckl_seawater_pressure'
var_list[5].name = 'ctdpf_ckl_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP01CNPM' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNPM/WFP01/05-PARADK000/telemetered/parad_k__stc_imodem_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_k_par'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CP01CNPM' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP01CNPM/WFP01/05-PARADK000/recovered_wfp/parad_k__stc_imodem_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_k_par'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CP02PMCI' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMCI/SBS01/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP02PMCI' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CP02PMCI/SBS01/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP02PMCI' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMCI/WFP01/04-FLORTK000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CP02PMCI' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP02PMCI/WFP01/04-FLORTK000/recovered_wfp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CP02PMCI' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMCI/WFP01/02-DOFSTK000/telemetered/dofst_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dofst_k_oxygen_l2'
var_list[2].name = 'dofst_k_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'Hz'
var_list[3].units = 'dbar'
elif platform_name == 'CP02PMCI' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP02PMCI/WFP01/02-DOFSTK000/recovered_wfp/dofst_k_wfp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dofst_k_oxygen_l2'
var_list[2].name = 'dofst_k_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'Hz'
var_list[3].units = 'dbar'
elif platform_name == 'CP02PMCI' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMCI/WFP01/01-VEL3DK000/telemetered/vel3d_k_wfp_stc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_k_eastward_velocity'
var_list[2].name = 'vel3d_k_northward_velocity'
var_list[3].name = 'vel3d_k_upward_velocity'
var_list[4].name = 'vel3d_k_heading'
var_list[5].name = 'vel3d_k_pitch'
var_list[6].name = 'vel3d_k_roll'
var_list[7].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'ddegrees'
var_list[5].units = 'ddegrees'
var_list[6].units = 'ddegrees'
var_list[7].units = 'dbar'
elif platform_name == 'CP02PMCI' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP02PMCI/WFP01/01-VEL3DK000/recovered_wfp/vel3d_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_k_eastward_velocity'
var_list[2].name = 'vel3d_k_northward_velocity'
var_list[3].name = 'vel3d_k_upward_velocity'
var_list[4].name = 'vel3d_k_heading'
var_list[5].name = 'vel3d_k_pitch'
var_list[6].name = 'vel3d_k_roll'
var_list[7].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'ddegrees'
var_list[5].units = 'ddegrees'
var_list[6].units = 'ddegrees'
var_list[7].units = 'dbar'
elif platform_name == 'CP02PMCI' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMCI/WFP01/03-CTDPFK000/telemetered/ctdpf_ckl_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'ctdpf_ckl_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdpf_ckl_seawater_pressure'
var_list[5].name = 'ctdpf_ckl_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP02PMCI' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP02PMCI/WFP01/03-CTDPFK000/recovered_wfp/ctdpf_ckl_wfp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdpf_ckl_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdpf_ckl_seawater_pressure'
var_list[5].name = 'ctdpf_ckl_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP02PMCI' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMCI/WFP01/05-PARADK000/telemetered/parad_k__stc_imodem_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_k_par'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CP02PMCI' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP02PMCI/WFP01/05-PARADK000/recovered_wfp/parad_k__stc_imodem_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_k_par'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CP02PMCO' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMCO/SBS01/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP02PMCO' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CP02PMCO/SBS01/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP02PMCO' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMCO/WFP01/04-FLORTK000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CP02PMCO' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP02PMCO/WFP01/04-FLORTK000/recovered_wfp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CP02PMCO' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMCO/WFP01/02-DOFSTK000/telemetered/dofst_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dofst_k_oxygen_l2'
var_list[2].name = 'dofst_k_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'Hz'
var_list[3].units = 'dbar'
elif platform_name == 'CP02PMCO' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP02PMCO/WFP01/02-DOFSTK000/recovered_wfp/dofst_k_wfp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dofst_k_oxygen_l2'
var_list[2].name = 'dofst_k_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'Hz'
var_list[3].units = 'dbar'
elif platform_name == 'CP02PMCO' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMCO/WFP01/01-VEL3DK000/telemetered/vel3d_k_wfp_stc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_k_eastward_velocity'
var_list[2].name = 'vel3d_k_northward_velocity'
var_list[3].name = 'vel3d_k_upward_velocity'
var_list[4].name = 'vel3d_k_heading'
var_list[5].name = 'vel3d_k_pitch'
var_list[6].name = 'vel3d_k_roll'
var_list[7].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'ddegrees'
var_list[5].units = 'ddegrees'
var_list[6].units = 'ddegrees'
var_list[7].units = 'dbar'
elif platform_name == 'CP02PMCO' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP02PMCO/WFP01/01-VEL3DK000/recovered_wfp/vel3d_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_k_eastward_velocity'
var_list[2].name = 'vel3d_k_northward_velocity'
var_list[3].name = 'vel3d_k_upward_velocity'
var_list[4].name = 'vel3d_k_heading'
var_list[5].name = 'vel3d_k_pitch'
var_list[6].name = 'vel3d_k_roll'
var_list[7].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'ddegrees'
var_list[5].units = 'ddegrees'
var_list[6].units = 'ddegrees'
var_list[7].units = 'dbar'
elif platform_name == 'CP02PMCO' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMCO/WFP01/03-CTDPFK000/telemetered/ctdpf_ckl_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'ctdpf_ckl_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdpf_ckl_seawater_pressure'
var_list[5].name = 'ctdpf_ckl_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP02PMCO' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP02PMCO/WFP01/03-CTDPFK000/recovered_wfp/ctdpf_ckl_wfp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdpf_ckl_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdpf_ckl_seawater_pressure'
var_list[5].name = 'ctdpf_ckl_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP02PMCO' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMCO/WFP01/05-PARADK000/telemetered/parad_k__stc_imodem_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_k_par'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CP02PMCO' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP02PMCO/WFP01/05-PARADK000/recovered_wfp/parad_k__stc_imodem_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_k_par'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CP02PMUI' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMUI/SBS01/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP02PMUI' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CP02PMUI/SBS01/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP02PMUI' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMUI/WFP01/04-FLORTK000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CP02PMUI' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP02PMUI/WFP01/04-FLORTK000/recovered_wfp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CP02PMUI' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMUI/WFP01/02-DOFSTK000/telemetered/dofst_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dofst_k_oxygen_l2'
var_list[2].name = 'dofst_k_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'Hz'
var_list[3].units = 'dbar'
elif platform_name == 'CP02PMUI' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP02PMUI/WFP01/02-DOFSTK000/recovered_wfp/dofst_k_wfp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dofst_k_oxygen_l2'
var_list[2].name = 'dofst_k_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'Hz'
var_list[3].units = 'dbar'
elif platform_name == 'CP02PMUI' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMUI/WFP01/01-VEL3DK000/telemetered/vel3d_k_wfp_stc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_k_eastward_velocity'
var_list[2].name = 'vel3d_k_northward_velocity'
var_list[3].name = 'vel3d_k_upward_velocity'
var_list[4].name = 'vel3d_k_heading'
var_list[5].name = 'vel3d_k_pitch'
var_list[6].name = 'vel3d_k_roll'
var_list[7].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'ddegrees'
var_list[5].units = 'ddegrees'
var_list[6].units = 'ddegrees'
var_list[7].units = 'dbar'
elif platform_name == 'CP02PMUI' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP02PMUI/WFP01/01-VEL3DK000/recovered_wfp/vel3d_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_k_eastward_velocity'
var_list[2].name = 'vel3d_k_northward_velocity'
var_list[3].name = 'vel3d_k_upward_velocity'
var_list[4].name = 'vel3d_k_heading'
var_list[5].name = 'vel3d_k_pitch'
var_list[6].name = 'vel3d_k_roll'
var_list[7].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'ddegrees'
var_list[5].units = 'ddegrees'
var_list[6].units = 'ddegrees'
var_list[7].units = 'dbar'
elif platform_name == 'CP02PMUI' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMUI/WFP01/03-CTDPFK000/telemetered/ctdpf_ckl_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'ctdpf_ckl_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdpf_ckl_seawater_pressure'
var_list[5].name = 'ctdpf_ckl_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP02PMUI' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP02PMUI/WFP01/03-CTDPFK000/recovered_wfp/ctdpf_ckl_wfp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdpf_ckl_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdpf_ckl_seawater_pressure'
var_list[5].name = 'ctdpf_ckl_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP02PMUI' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMUI/WFP01/05-PARADK000/telemetered/parad_k__stc_imodem_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_k_par'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CP02PMUI' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP02PMUI/WFP01/05-PARADK000/recovered_wfp/parad_k__stc_imodem_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_k_par'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CP02PMUO' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMUO/SBS01/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP02PMUO' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CP02PMUO/SBS01/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP02PMUO' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMUO/WFP01/04-FLORTK000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CP02PMUO' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP02PMUO/WFP01/04-FLORTK000/recovered_wfp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CP02PMUO' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMUO/WFP01/02-DOFSTK000/telemetered/dofst_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dofst_k_oxygen_l2'
var_list[2].name = 'dofst_k_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'Hz'
var_list[3].units = 'dbar'
elif platform_name == 'CP02PMUO' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP02PMUO/WFP01/02-DOFSTK000/recovered_wfp/dofst_k_wfp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dofst_k_oxygen_l2'
var_list[2].name = 'dofst_k_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'Hz'
var_list[3].units = 'dbar'
elif platform_name == 'CP02PMUO' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMUO/WFP01/01-VEL3DK000/telemetered/vel3d_k_wfp_stc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_k_eastward_velocity'
var_list[2].name = 'vel3d_k_northward_velocity'
var_list[3].name = 'vel3d_k_upward_velocity'
var_list[4].name = 'vel3d_k_heading'
var_list[5].name = 'vel3d_k_pitch'
var_list[6].name = 'vel3d_k_roll'
var_list[7].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'ddegrees'
var_list[5].units = 'ddegrees'
var_list[6].units = 'ddegrees'
var_list[7].units = 'dbar'
elif platform_name == 'CP02PMUO' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP02PMUO/WFP01/01-VEL3DK000/recovered_wfp/vel3d_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_k_eastward_velocity'
var_list[2].name = 'vel3d_k_northward_velocity'
var_list[3].name = 'vel3d_k_upward_velocity'
var_list[4].name = 'vel3d_k_heading'
var_list[5].name = 'vel3d_k_pitch'
var_list[6].name = 'vel3d_k_roll'
var_list[7].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'ddegrees'
var_list[5].units = 'ddegrees'
var_list[6].units = 'ddegrees'
var_list[7].units = 'dbar'
elif platform_name == 'CP02PMUO' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMUO/WFP01/03-CTDPFK000/telemetered/ctdpf_ckl_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'ctdpf_ckl_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdpf_ckl_seawater_pressure'
var_list[5].name = 'ctdpf_ckl_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP02PMUO' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP02PMUO/WFP01/03-CTDPFK000/recovered_wfp/ctdpf_ckl_wfp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdpf_ckl_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdpf_ckl_seawater_pressure'
var_list[5].name = 'ctdpf_ckl_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP02PMUO' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMUO/WFP01/05-PARADK000/telemetered/parad_k__stc_imodem_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_k_par'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CP02PMUO' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP02PMUO/WFP01/05-PARADK000/recovered_wfp/parad_k__stc_imodem_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_k_par'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CP03ISPM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISPM/SBS01/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP03ISPM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISPM/SBS01/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP03ISPM' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISPM/WFP01/04-FLORTK000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CP03ISPM' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP03ISPM/WFP01/04-FLORTK000/recovered_wfp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CP03ISPM' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISPM/WFP01/02-DOFSTK000/telemetered/dofst_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dofst_k_oxygen_l2'
var_list[2].name = 'dofst_k_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'Hz'
var_list[3].units = 'dbar'
elif platform_name == 'CP03ISPM' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP03ISPM/WFP01/02-DOFSTK000/recovered_wfp/dofst_k_wfp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dofst_k_oxygen_l2'
var_list[2].name = 'dofst_k_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'Hz'
var_list[3].units = 'dbar'
elif platform_name == 'CP03ISPM' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISPM/WFP01/01-VEL3DK000/telemetered/vel3d_k_wfp_stc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_k_eastward_velocity'
var_list[2].name = 'vel3d_k_northward_velocity'
var_list[3].name = 'vel3d_k_upward_velocity'
var_list[4].name = 'vel3d_k_heading'
var_list[5].name = 'vel3d_k_pitch'
var_list[6].name = 'vel3d_k_roll'
var_list[7].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'ddegrees'
var_list[5].units = 'ddegrees'
var_list[6].units = 'ddegrees'
var_list[7].units = 'dbar'
elif platform_name == 'CP03ISPM' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP03ISPM/WFP01/01-VEL3DK000/recovered_wfp/vel3d_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_k_eastward_velocity'
var_list[2].name = 'vel3d_k_northward_velocity'
var_list[3].name = 'vel3d_k_upward_velocity'
var_list[4].name = 'vel3d_k_heading'
var_list[5].name = 'vel3d_k_pitch'
var_list[6].name = 'vel3d_k_roll'
var_list[7].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'ddegrees'
var_list[5].units = 'ddegrees'
var_list[6].units = 'ddegrees'
var_list[7].units = 'dbar'
elif platform_name == 'CP03ISPM' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISPM/WFP01/03-CTDPFK000/telemetered/ctdpf_ckl_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'ctdpf_ckl_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdpf_ckl_seawater_pressure'
var_list[5].name = 'ctdpf_ckl_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP03ISPM' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP03ISPM/WFP01/03-CTDPFK000/recovered_wfp/ctdpf_ckl_wfp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdpf_ckl_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdpf_ckl_seawater_pressure'
var_list[5].name = 'ctdpf_ckl_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP03ISPM' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISPM/WFP01/05-PARADK000/telemetered/parad_k__stc_imodem_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_k_par'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CP03ISPM' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP03ISPM/WFP01/05-PARADK000/recovered_wfp/parad_k__stc_imodem_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_k_par'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CP04OSPM' and node == 'RISER' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CP04OSPM/RII01/02-ADCPSL010/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CP04OSPM' and node == 'RISER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSPM/RII01/02-ADCPSL010/recovered_host/adcps_jln_stc_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'adcps_jln_heading'
var_list[3].name = 'adcps_jln_pitch'
var_list[4].name = 'adcps_jln_roll'
var_list[5].name = 'adcps_jln_eastward_seawater_velocity2'
var_list[6].name = 'adcps_jln_northward_seawater_velocity2'
var_list[7].name = 'adcps_jln_upward_seawater_velocity2'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'cdegree'
var_list[3].units = 'cdegree'
var_list[4].units = 'cdegree'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CP04OSPM' and node == 'RISER' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSPM/RII01/02-ADCPSL010/telemetered/adcps_jln_stc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'adcps_jln_heading'
var_list[3].name = 'adcps_jln_pitch'
var_list[4].name = 'adcps_jln_roll'
var_list[5].name = 'adcps_jln_eastward_seawater_velocity2'
var_list[6].name = 'adcps_jln_northward_seawater_velocity2'
var_list[7].name = 'adcps_jln_upward_seawater_velocity2'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'cdegree'
var_list[3].units = 'cdegree'
var_list[4].units = 'cdegree'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CP01CNPM' and node == 'RISER' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CP01CNPM/RII01/02-ADCPTG010/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CP01CNPM' and node == 'RISER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNPM/RII01/02-ADCPTG010/recovered_host/adcps_jln_stc_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'adcps_jln_heading'
var_list[3].name = 'adcps_jln_pitch'
var_list[4].name = 'adcps_jln_roll'
var_list[5].name = 'adcps_jln_eastward_seawater_velocity2'
var_list[6].name = 'adcps_jln_northward_seawater_velocity2'
var_list[7].name = 'adcps_jln_upward_seawater_velocity2'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'cdegree'
var_list[3].units = 'cdegree'
var_list[4].units = 'cdegree'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CP01CNPM' and node == 'RISER' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNPM/RII01/02-ADCPTG010/telemetered/adcps_jln_stc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'adcps_jln_heading'
var_list[3].name = 'adcps_jln_pitch'
var_list[4].name = 'adcps_jln_roll'
var_list[5].name = 'adcps_jln_eastward_seawater_velocity2'
var_list[6].name = 'adcps_jln_northward_seawater_velocity2'
var_list[7].name = 'adcps_jln_upward_seawater_velocity2'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'cdegree'
var_list[3].units = 'cdegree'
var_list[4].units = 'cdegree'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CP02PMCI' and node == 'RISER' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CP02PMCI/RII01/02-ADCPTG010/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CP02PMCI' and node == 'RISER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CP02PMCI/RII01/02-ADCPTG010/recovered_host/adcps_jln_stc_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'adcps_jln_heading'
var_list[3].name = 'adcps_jln_pitch'
var_list[4].name = 'adcps_jln_roll'
var_list[5].name = 'adcps_jln_eastward_seawater_velocity2'
var_list[6].name = 'adcps_jln_northward_seawater_velocity2'
var_list[7].name = 'adcps_jln_upward_seawater_velocity2'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'cdegree'
var_list[3].units = 'cdegree'
var_list[4].units = 'cdegree'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CP02PMCI' and node == 'RISER' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMCI/RII01/02-ADCPTG010/telemetered/adcps_jln_stc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'adcps_jln_heading'
var_list[3].name = 'adcps_jln_pitch'
var_list[4].name = 'adcps_jln_roll'
var_list[5].name = 'adcps_jln_eastward_seawater_velocity2'
var_list[6].name = 'adcps_jln_northward_seawater_velocity2'
var_list[7].name = 'adcps_jln_upward_seawater_velocity2'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'cdegree'
var_list[3].units = 'cdegree'
var_list[4].units = 'cdegree'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CP02PMCO' and node == 'RISER' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CP02PMCO/RII01/02-ADCPTG010/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CP02PMCO' and node == 'RISER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CP02PMCO/RII01/02-ADCPTG010/recovered_host/adcps_jln_stc_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'adcps_jln_heading'
var_list[3].name = 'adcps_jln_pitch'
var_list[4].name = 'adcps_jln_roll'
var_list[5].name = 'adcps_jln_eastward_seawater_velocity2'
var_list[6].name = 'adcps_jln_northward_seawater_velocity2'
var_list[7].name = 'adcps_jln_upward_seawater_velocity2'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'cdegree'
var_list[3].units = 'cdegree'
var_list[4].units = 'cdegree'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CP02PMCO' and node == 'RISER' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMCO/RII01/02-ADCPTG010/telemetered/adcps_jln_stc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'adcps_jln_heading'
var_list[3].name = 'adcps_jln_pitch'
var_list[4].name = 'adcps_jln_roll'
var_list[5].name = 'adcps_jln_eastward_seawater_velocity2'
var_list[6].name = 'adcps_jln_northward_seawater_velocity2'
var_list[7].name = 'adcps_jln_upward_seawater_velocity2'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'cdegree'
var_list[3].units = 'cdegree'
var_list[4].units = 'cdegree'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CP02PMUI' and node == 'RISER' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CP02PMUI/RII01/02-ADCPTG010/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CP02PMUI' and node == 'RISER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CP02PMUI/RII01/02-ADCPTG010/recovered_host/adcps_jln_stc_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'adcps_jln_heading'
var_list[3].name = 'adcps_jln_pitch'
var_list[4].name = 'adcps_jln_roll'
var_list[5].name = 'adcps_jln_eastward_seawater_velocity2'
var_list[6].name = 'adcps_jln_northward_seawater_velocity2'
var_list[7].name = 'adcps_jln_upward_seawater_velocity2'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'cdegree'
var_list[3].units = 'cdegree'
var_list[4].units = 'cdegree'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CP02PMUI' and node == 'RISER' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMUI/RII01/02-ADCPTG010/telemetered/adcps_jln_stc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'adcps_jln_heading'
var_list[3].name = 'adcps_jln_pitch'
var_list[4].name = 'adcps_jln_roll'
var_list[5].name = 'adcps_jln_eastward_seawater_velocity2'
var_list[6].name = 'adcps_jln_northward_seawater_velocity2'
var_list[7].name = 'adcps_jln_upward_seawater_velocity2'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'cdegree'
var_list[3].units = 'cdegree'
var_list[4].units = 'cdegree'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CP02PMUO' and node == 'RISER' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CP02PMUO/RII01/02-ADCPSL010/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CP02PMUO' and node == 'RISER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CP02PMUO/RII01/02-ADCPSL010/recovered_host/adcps_jln_stc_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'adcps_jln_heading'
var_list[3].name = 'adcps_jln_pitch'
var_list[4].name = 'adcps_jln_roll'
var_list[5].name = 'adcps_jln_eastward_seawater_velocity2'
var_list[6].name = 'adcps_jln_northward_seawater_velocity2'
var_list[7].name = 'adcps_jln_upward_seawater_velocity2'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'cdegree'
var_list[3].units = 'cdegree'
var_list[4].units = 'cdegree'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CP02PMUO' and node == 'RISER' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMUO/RII01/02-ADCPSL010/telemetered/adcps_jln_stc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'adcps_jln_heading'
var_list[3].name = 'adcps_jln_pitch'
var_list[4].name = 'adcps_jln_roll'
var_list[5].name = 'adcps_jln_eastward_seawater_velocity2'
var_list[6].name = 'adcps_jln_northward_seawater_velocity2'
var_list[7].name = 'adcps_jln_upward_seawater_velocity2'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'cdegree'
var_list[3].units = 'cdegree'
var_list[4].units = 'cdegree'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CP03ISPM' and node == 'RISER' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CP03ISPM/RII01/02-ADCPTG010/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CP03ISPM' and node == 'RISER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISPM/RII01/02-ADCPTG010/recovered_host/adcps_jln_stc_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'adcps_jln_heading'
var_list[3].name = 'adcps_jln_pitch'
var_list[4].name = 'adcps_jln_roll'
var_list[5].name = 'adcps_jln_eastward_seawater_velocity2'
var_list[6].name = 'adcps_jln_northward_seawater_velocity2'
var_list[7].name = 'adcps_jln_upward_seawater_velocity2'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'cdegree'
var_list[3].units = 'cdegree'
var_list[4].units = 'cdegree'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CP03ISPM' and node == 'RISER' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISPM/RII01/02-ADCPTG010/telemetered/adcps_jln_stc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'adcps_jln_heading'
var_list[3].name = 'adcps_jln_pitch'
var_list[4].name = 'adcps_jln_roll'
var_list[5].name = 'adcps_jln_eastward_seawater_velocity2'
var_list[6].name = 'adcps_jln_northward_seawater_velocity2'
var_list[7].name = 'adcps_jln_upward_seawater_velocity2'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'cdegree'
var_list[3].units = 'cdegree'
var_list[4].units = 'cdegree'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CPGL336' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CP05MOAS/GL336/03-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CPGL336' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CP05MOAS/GL336/03-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CPGL336' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CP05MOAS/GL336/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CPGL336' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP05MOAS/GL336/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CPGL336' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CP05MOAS/GL336/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CPGL336' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CP05MOAS/GL336/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CPGL336' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CP05MOAS/GL336/05-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CPGL336' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CP05MOAS/GL336/05-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CPGL336' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CP05MOAS/GL336/01-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CPGL388' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CP05MOAS/GL388/03-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CPGL388' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CP05MOAS/GL388/03-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CPGL388' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CP05MOAS/GL388/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CPGL388' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP05MOAS/GL388/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CPGL388' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CP05MOAS/GL388/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CPGL388' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CP05MOAS/GL388/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CPGL388' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CP05MOAS/GL388/05-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = | np.array([]) | numpy.array |
import enum
import numpy as np
import numpy.typing as npt
import pyoorb
import requests as req
from astropy.time import Time
from os import getenv
from typing import (
Iterable,
Optional,
Tuple,
List
)
from .spherical_geom import propagate_linearly
pyoorb_initialized = False
DEGREE = 1.0
ARCMIN = DEGREE / 60
ARCSEC = ARCMIN / 60
def _ensure_pyoorb_initialized(*args, **kwargs):
"""Make sure that pyoorb is initialized."""
global pyoorb_initialized
if not pyoorb_initialized:
pyoorb.pyoorb.oorb_init(*args, **kwargs)
pyoorb_initialized = True
class OrbitElementType(enum.Enum):
CARTESIAN = 1
COMETARY = 2
KEPLERIAN = 3
class EpochTimescale(enum.Enum):
UTC = 1
UT1 = 2
TT = 3
TAI = 4
class PropagationIntegrator(enum.Enum):
N_BODY = 1
TWO_BODY = 2
class Orbit:
def __init__(self, orbit_id: int, state_vector: npt.NDArray[np.float64]):
"""
Create a new Orbit.
state_vector is a pretty opaque blob. It should be the structure that
pyoorb expects - a 12-element vector of doubles.
"""
self.orbit_id = orbit_id
self._state_vector = state_vector
self._orbit_type = OrbitElementType(int(state_vector[0][7]))
self._epoch_timescale = EpochTimescale(int(state_vector[0][9]))
self._epoch = state_vector[0][8]
@classmethod
def cometary(
cls,
orbit_id: int,
perihelion_au: float,
eccentricity: float,
inclination_deg: float,
ascending_node_longitude_deg: float,
periapsis_argument_deg: float,
perihelion_epoch_mjd: float,
osculating_element_epoch_mjd: float,
epoch_timescale: EpochTimescale,
abs_magnitude: float,
photometric_slope_parameter: float,
):
# Orbits class takes in degrees, but state vectors are given in radians
state_vector = np.array(
[
[
orbit_id,
perihelion_au,
eccentricity,
np.deg2rad(inclination_deg),
np.deg2rad(ascending_node_longitude_deg),
np.deg2rad(periapsis_argument_deg),
perihelion_epoch_mjd,
OrbitElementType.COMETARY.value,
osculating_element_epoch_mjd,
epoch_timescale.value,
abs_magnitude,
photometric_slope_parameter,
]
],
dtype=np.double,
order="F",
)
return cls(orbit_id, state_vector)
@classmethod
def keplerian(
cls,
orbit_id: int,
semimajor_axis_au: float,
eccentricity: float,
inclination_deg: float,
ascending_node_longitude_deg: float,
periapsis_argument_deg: float,
mean_anomaly_deg: float,
osculating_element_epoch_mjd: float,
epoch_timescale: EpochTimescale,
abs_magnitude: float,
photometric_slope_parameter: float,
):
# Orbits class takes in degrees, but state vectors are given in radians
state_vector = np.array(
[
[
orbit_id,
semimajor_axis_au,
eccentricity,
np.deg2rad(inclination_deg),
np.deg2rad(ascending_node_longitude_deg),
np.deg2rad(periapsis_argument_deg),
| np.deg2rad(mean_anomaly_deg) | numpy.deg2rad |
# Copyright (C) 2021-2022 by the FEMlium authors
#
# This file is part of FEMlium.
#
# SPDX-License-Identifier: MIT
import numpy as np
import geojson
import folium
from femlium.base_plotter import BasePlotter
from femlium.utils import ColorbarWrapper
class BaseMeshPlotter(BasePlotter):
"""
This class contains the base interface of a geographic plotter for mesh-related plots.
"""
def add_mesh_to(self, geo_map, vertices, cells,
cell_markers=None, face_markers=None,
cell_colors=None, face_colors=None, face_weights=None):
"""
Add a triangular mesh to a folium map.
Parameters
----------
geo_map : folium.Map
Map to which the mesh plot should be added.
vertices: 2d numpy array
Matrix containing the coordinates of the vertices.
The matrix should have as many rows as vertices in the mesh, and two columns.
cells: 2d numpy array
Matrix containing the connectivity of the cells.
The matrix should have as many rows as cells in the mesh, and three columns.
cell_markers: 1d numpy array, optional
Vector containing a marker (i.e., an integer number) for each cell.
The vector should have as many entries as cells in the mesh.
If not provided, the marker will be set to 0 everywhere.
face_markers: 2d numpy array, optional
Vector containing a marker (i.e., an integer number) for each face.
The matrix should have the same shape of the cells argument.
Given a row index r, the entry face_markers[r, 0] is the marker of the
face connecting the first and second vertex of the r-th cell.
Similarly, face_markers[r, 1] is the marker associated to the face connecting
the second and third vertex of the r-th cell. Finally, face_markers[r, 2] is
the marker associated to the face connecting the first and third vertex of the
r-th cell.
If not provided, the marker will be set to 0 everywhere.
cell_colors: str or dict of str, optional
If a dictionary is provided, it should contain key: value pairs defining the mapping
marker: color for cells.
If a string is provided instead of a dictionary, the same color will be used for all
cell markers.
If not provided, the cells will not be colored.
face_colors: str or dict of str, optional
If a dictionary is provided, it should contain key: value pairs defining the mapping
marker: color for faces.
If a string is provided instead of a dictionary, the same color will be used for all
face markers.
If not provided, a default black color will be used for faces.
face_weights: int or dict of int, optional
Line weight of each face. Input should be provided following a similar convention for
the face_colors argument.
If not provided, a unit weight will be used.
"""
if cell_markers is None:
cell_markers = np.zeros((cells.shape[0], ), dtype=np.dtype(int))
else:
assert cell_markers.shape[0] == cells.shape[0]
if face_markers is None:
face_markers = np.zeros(cells.shape, dtype= | np.dtype(int) | numpy.dtype |
# coding: utf-8
# In[20]:
import numpy as np
import pydensecrf.densecrf as dcrf
import os
import cv2
import random
from tqdm import tqdm
# In[21]:
from skimage.color import gray2rgb
from skimage.color import rgb2gray
import matplotlib.pyplot as plt
from sklearn.metrics import f1_score, accuracy_score
from pydensecrf.utils import unary_from_labels, create_pairwise_bilateral, create_pairwise_gaussian, unary_from_softmax
#from osgeo import gdal
get_ipython().run_line_magic('matplotlib', 'inline')
# In[22]:
# Color maps for direction map
COLOR_LR = [0,128,128]
COLOR_UD = [128,0,128]
COLOR_DIAG = [255,215,0]
COLOR_ADIAG = [1,255,255]
INF = 10000
# In[23]:
MAX = 0
SUM = 1
VEC = 0
MAT = 1
# In[24]:
def dir_to_features(dir_map):
"""Converts direction color map to feature used for crf kernel. The
feature is obtained by computing the intersections of the x, y axis and the
line determined by the position of one point and its direction. (More details in
the report)
Parameters
____________
dir_map: numpy.array
Direction map that maps each pixel to a direction in
[left_right, up_down, diagonal, anti-diagonal], each direction
is represented by a color.
"""
(h, w, c) = dir_map.shape
feature_map = np.zeros((h,w,2))
for i in range(h):
for j in range(w):
dir_color = dir_map[i,j]
if dir_color[0] == COLOR_LR[0]: # dir = lr
feature_map[i,j] = np.array([INF,i])
if dir_color[0] == COLOR_UP[0]: # dir = ud
feature_map[i,j] = np.array([j,INF])
if dir_color[1] == COLOR_DIAG[0]: # dir = diag
feature_map[i,j] = np.array([j-i,i-j])
if dir_color[1] == COLOR_ADIAG[0]: # dir = adiag
feature_map[i,j] = np.array([i+j, i+j])
return feature_map
# In[25]:
def gen_dir_map(img):
"""Generate direction map from a rgb img
Parameters
____________
img: numpy.array
Rgb img with width = height
"""
window_size = 101
half_size = int((window_size-1)/2)
sigma_1 = 2
sigma_2 = 40
(h, w, c) = img.shape
assert h==w, "h and w are not equal"
dir_map = np.zeros((h,w))
pos_mat = np.zeros((h,w,2))
for i in range(h):
for j in range(w):
pos_mat[i,j,0]=i
pos_mat[i,j,1]=j
padded_pos = np.pad(pos_mat, ((half_size, half_size), (half_size, half_size), (0,0)))
padded_img = np.pad(img, ((half_size, half_size), (half_size, half_size), (0,0)))
index_mask_lr = np.zeros((window_size, window_size)).astype("bool")
index_mask_lr[half_size,:]=True
index_mask_ud = np.zeros((window_size, window_size)).astype("bool")
index_mask_ud[:,half_size]=True
index_mask_diag = np.identity(window_size).astype("bool")
index_mask_adiag = np.fliplr(np.identity(window_size)).astype("bool")
mask_list = [index_mask_lr, index_mask_ud, index_mask_diag, index_mask_adiag]
for i in range(h):
for j in range(w):
img_nbr = padded_img[i:i+window_size,j:j+window_size]
pos_nbr = padded_pos[i:i+window_size,j:j+window_size]
img_nbr = img_nbr - img[i,j,:]
pos_nbr = pos_nbr - np.array([i,j])
dir_intensity = np.zeros(4)
for dir_index, index_mask in enumerate(mask_list):
img_nbr_dir = img_nbr[index_mask]
pos_nbr_dir = pos_nbr[index_mask]
img_nbr_dir = np.sum(img_nbr_dir**2, axis=1)/(2*sigma_1**2)
pos_nbr_dir = np.sum(pos_nbr_dir**2, axis=1)/(2*sigma_2**2)
k = np.exp(-img_nbr_dir-pos_nbr_dir)
dir_intensity[dir_index]=np.sum(k)
dir_map[i,j]=np.argmax(dir_intensity)+1
return dir_map
# In[26]:
def visualize_dir_map(img, dir_map, save_file=False,
filename=None, vis_path=None, dir_path=None):
"""Visualize a direction map
Parameters
____________
img: numpy.array
Rgb img
dir_map: numpy.array
Correspongding direction map
...
"""
h = img.shape[0]
w = img.shape[1]
vis_dir = np.zeros(img.shape)
vis_dir[dir_map==1] = np.array(COLOR_LR)
vis_dir[dir_map==2] = np.array(COLOR_UD)
vis_dir[dir_map==3] = np.array(COLOR_DIAG)
vis_dir[dir_map==4] = np.array(COLOR_ADIAG)
plt.figure(figsize=(10,5))
plt.subplot(1,2,1); plt.imshow(img); plt.title('Original Image (blurred)'); plt.axis('off');
plt.subplot(1,2,2); plt.imshow(dir_map); plt.title('Direction map'); plt.axis('off');
if save_file:
plt.savefig(os.path.join(vis_path, filename),dpi=300)
plt.close()
cv2.imwrite(os.path.join(dir_path, filename), vis_dir)
# In[27]:
def gen_dir_map_and_visualize(image_path= './images/',
vis_path='./vis_dir_blur_/',
dir_path='./dir_map_/',
process_all=True):
"""Generate direction color map for images in image_path
Parameters
____________
image_path: string
Image path
vis_path: string
Path to save visualization results
dir_path: string
Path to save direction map
process_all: Bool
False to generate a single visualization result without save. True to
generate and save visualizaiton results for all images.
"""
if not os.path.exists(dir_path):
os.mkdir(dir_path)
if not os.path.exists(vis_path):
os.mkdir(vis_path)
if process_all:
for file in tqdm(os.listdir(image_path)):
img = cv2.imread(os.path.join(image_path, file))
img = cv2.GaussianBlur(img,(5,5),0)
dir_map = gen_dir_map(img)
visualize_dir_map(img, dir_map, filename=file, save_file=True,
vis_path=vis_path, dir_path=dir_path)
else:
img = cv2.imread('./images/satImage_001.png')
img = cv2.GaussianBlur(img,(5,5),0)
dir_map = gen_dir_map(img)
visualize_dir_map(img, dir_map, save_file=False)
# In[28]:
def crf_with_dir_kernel(original_img, dir_feature, prob,
iter_num, compat_smooth, compat_appearance, compat_struct,
w_smooth, w_appearance, w_struct,
sigma_smooth, sigma_app_color, sigma_app_pos,
sigma_struct_pos, sigma_struct_feat):
"""CRF with a Gaussian smoothing kernel, an appearance kernel and a structural kernel
"""
(h,w) = prob.shape
y = np.zeros((h,w,2))
y[:,:,1] = prob
y[:,:,0] = 1-y[:,:,1]
annotated_image=y.transpose((2, 0, 1))
#Gives no of class labels in the annotated image
n_labels = 2
#Setting up the CRF model
d = dcrf.DenseCRF2D(original_img.shape[1], original_img.shape[0], n_labels)
# get unary potentials (neg log probability)
U = unary_from_softmax(annotated_image)
unary = np.ascontiguousarray(U)
d.setUnaryEnergy(unary)
compat_smooth = compat_smooth * w_smooth
compat_appearance = compat_appearance * w_appearance
compat_struct = compat_struct * w_struct
# Smooth kernel
d.addPairwiseGaussian(sxy=(sigma_smooth, sigma_smooth), compat=compat_smooth.astype(np.float32),
kernel=dcrf.DIAG_KERNEL,
normalization=dcrf.NORMALIZE_SYMMETRIC)
# Appearance kernel
d.addPairwiseBilateral(sxy=(sigma_app_pos, sigma_app_pos),
srgb=(sigma_app_color, sigma_app_color, sigma_app_color),
rgbim=original_image,
compat=compat_appearance.astype(np.float32),
kernel=dcrf.DIAG_KERNEL,
normalization=dcrf.NORMALIZE_SYMMETRIC)
# Structural kernel
pairwise_energy = create_pairwise_bilateral(sdims=(sigma_struct_pos,sigma_struct_pos),
schan=(sigma_struct_feat,sigma_struct_feat),
img=dir_feature, chdim=2)
d.addPairwiseEnergy(pairwise_energy, compat=compat_struct.astype(np.float32))
Q = d.inference(iter_num)
proba = np.array(Q)
return proba[1].reshape((dir_feature.shape[0], dir_feature.shape[1]))
# In[29]:
def crf(original_image, prob,
iter_num=4, compat_smooth = np.array([[-0.4946432, 1.27117338],[0.59452892, 0.23182234]]),
compat_appearance = np.array([[-0.30571318, 0.83015124],[1.3217825, -0.13046645]]),
w_smooth=3.7946478055761963, w_appearance=1.8458537690881878,
sigma_smooth=8.575103751642672, sigma_color=2.0738539891571977, sigma_color_pos=20):
"""Basic CRF with a Gaussian smoothing kernel and an appearance kernel
"""
(h,w) = prob.shape
y = np.zeros((h,w,2))
y[:,:,1] = prob
y[:,:,0] = 1-y[:,:,1]
annotated_image=y.transpose((2, 0, 1))
#Gives no of class labels in the annotated image
n_labels = 2
#print("No of labels in the Image are ")
#print(n_labels)
#Setting up the CRF model
d = dcrf.DenseCRF2D(original_image.shape[1], original_image.shape[0], n_labels)
# get unary potentials (neg log probability)
U = unary_from_softmax(annotated_image)
unary = np.ascontiguousarray(U)
d.setUnaryEnergy(unary)
compat_smooth=compat_smooth*w_smooth
compat_appearance=compat_appearance*w_appearance
# This adds the color-independent term, features are the locations only.
d.addPairwiseGaussian(sxy=(sigma_smooth, sigma_smooth), compat=compat_smooth.astype(np.float32), kernel=dcrf.DIAG_KERNEL,
normalization=dcrf.NORMALIZE_SYMMETRIC)
# This adds the color-dependent term, i.e. features are (x,y,r,g,b).
d.addPairwiseBilateral(sxy=(sigma_color_pos, sigma_color_pos), srgb=(sigma_color, sigma_color, sigma_color), rgbim=original_image,
compat=compat_appearance.astype(np.float32),
kernel=dcrf.DIAG_KERNEL,
normalization=dcrf.NORMALIZE_SYMMETRIC)
Q = d.inference(iter_num)
proba = np.array(Q)
return proba[1].reshape((original_image.shape[0], original_image.shape[1]))
# In[30]:
def crf_smooth(original_image, prob, use_2d = True, iter_num=1, w=4.921522279119057, sigma_sm=4.325251720130304):
"""CRF with only a smoothing kernel
"""
(h,w) = prob.shape
y = np.zeros((h,w,2))
y[:,:,1] = prob
y[:,:,0] = 1-y[:,:,1]
annotated_image=y.transpose((2, 0, 1))
#Gives no of class labels in the annotated image
n_labels = 2
#Setting up the CRF model
if use_2d :
d = dcrf.DenseCRF2D(original_image.shape[1], original_image.shape[0], n_labels)
# get unary potentials (neg log probability)
U = unary_from_softmax(annotated_image)
unary = np.ascontiguousarray(U)
d.setUnaryEnergy(unary)
# This adds the color-independent term, features are the locations only.
d.addPairwiseGaussian(sxy=(sigma_sm, sigma_sm), compat=w, kernel=dcrf.DIAG_KERNEL,
normalization=dcrf.NORMALIZE_SYMMETRIC)
Q = d.inference(iter_num)
proba = np.array(Q)
return proba[1].reshape((original_image.shape[0], original_image.shape[1]))
# In[31]:
def propagate_max_mat(img, prob):
"""Probability propagation (max) in 4 directions via matrix multiplication
"""
prob_out = prob.copy()
prop_size = 51
half_size = int((prop_size-1)/2)
prop_num = 3
sigma_1 = 5
sigma_2 = 42
(h, w) = prob.shape
pos_mat = np.zeros((h,w,2))
for i in range(h):
for j in range(w):
pos_mat[i,j,0]=i
pos_mat[i,j,1]=j
padded_pos = np.pad(pos_mat, ((half_size, half_size), (half_size, half_size), (0,0)))
padded_img = np.pad(img, ((half_size, half_size), (half_size, half_size), (0,0)))
index_mask = np.zeros((prop_size, prop_size)).astype("bool")
for i in range(prop_size):
index_mask[i,half_size]=1
index_mask[half_size,i]=1
index_mask[i,i]=1
index_mask[prop_size-1-i,i]=1
for iteration in range(prop_num):
padded_prob = np.pad(prob_out, ((half_size, half_size), (half_size, half_size)))
# propagate prob (maximum)
for i in range(h):
for j in range(w):
if prob_out[i,j]<0.01:
continue
img_nbr = padded_img[i:i+prop_size,j:j+prop_size]
pos_nbr = padded_pos[i:i+prop_size,j:j+prop_size]
img_nbr = img_nbr - img[i,j,:]
pos_nbr = pos_nbr - np.array([i,j])
img_nbr[~index_mask]=0
pos_nbr[~index_mask]=0
img_nbr = np.sum(img_nbr**2, axis=2)/(2*sigma_1**2)
pos_nbr = np.sum(pos_nbr**2, axis=2)/(2*sigma_2**2)
k = np.exp(-img_nbr-pos_nbr)*prob_out[i,j]
k = k*index_mask
padded_prob[i:i+prop_size,j:j+prop_size] = np.maximum(padded_prob[i:i+prop_size,j:j+prop_size], k)
prob_out = padded_prob[half_size:h+half_size,half_size:w+half_size]
return prob_out
# In[32]:
def propagate_max_vec(img, prob, prop_size=11,
prop_num=16, sigma_1=1.039316347691348, sigma_2=40):
"""
vec means only do propagation along x and y axis
max means propagate using max function
Args:
prop_size: neighborhood size
prop_num: number of iteration/propagation
sigma_1: variance of color
sigma_2: variance of distance
"""
prob_out = prob.copy()
half_size = int((prop_size-1)/2)
(h, w, c) = img.shape
pos_mat = np.zeros((h,w,2)) # position matrix
for i in range(h):
for j in range(w):
pos_mat[i,j,0]=i
pos_mat[i,j,1]=j
padded_pos = np.pad(pos_mat, ((half_size, half_size), (half_size, half_size), (0,0)))
padded_img = np.pad(img, ((half_size, half_size), (half_size, half_size), (0,0)))
for iteration in range(prop_num):
padded_prob = np.pad(prob_out, ((half_size, half_size), (half_size, half_size)))
padded_prob_fix = padded_prob.copy()
# propagate prob (maximum)
assert h==w, "h and w are not equal"
for i in range(h):
# prop along y for row i
img_nbr = padded_img[i:i+prop_size,:]
pos_nbr = padded_pos[i:i+prop_size,:]
img_nbr = img_nbr - padded_img[i+half_size,:,:]
pos_nbr = pos_nbr - padded_pos[i+half_size,:,:]
img_nbr = np.sum(img_nbr**2, axis=2)/(2*sigma_1**2)
pos_nbr = np.sum(pos_nbr**2, axis=2)/(2*sigma_2**2)
k = np.exp(-img_nbr-pos_nbr)*padded_prob_fix[i+half_size,:]
padded_prob[i:i+prop_size,:] = np.maximum(padded_prob[i:i+prop_size,:], k)
# prop along x for col i
img_nbr = padded_img[:,i:i+prop_size]
pos_nbr = padded_pos[:,i:i+prop_size]
img_nbr = img_nbr - padded_img[:,i+half_size,:].reshape((padded_img.shape[0],1,c))
pos_nbr = pos_nbr - padded_pos[:,i+half_size,:].reshape((padded_img.shape[0],1,2))
img_nbr = np.sum(img_nbr**2, axis=2)/(2*sigma_1**2)
pos_nbr = np.sum(pos_nbr**2, axis=2)/(2*sigma_2**2)
k = np.exp(-img_nbr-pos_nbr)*padded_prob_fix[:,i+half_size].reshape((-1,1))
padded_prob[:,i:i+prop_size] = np.maximum(padded_prob[:,i:i+prop_size], k)
prob_out = padded_prob[half_size:h+half_size,half_size:w+half_size]
return prob_out
# In[33]:
def propagate_sum_vec(img, prob, prop_size=11, prop_num=1, sigma_1=1.5319569104856783, sigma_2=80):
"""
vec means only do propagation along x and y axis
sum means propagate in a additive schema (with total probability fixed)
Args:
prop_size: neighborhood size
prop_num: number of iteration/propagation
sigma_1: variance of color
sigma_2: variance of distance
"""
# print(np.sum(prob))
prob_out = prob.copy()
half_size = int((prop_size-1)/2)
(h, w, c) = img.shape
pos_mat = np.zeros((h,w,2)) # position matrix
for i in range(h):
for j in range(w):
pos_mat[i,j,0]=i
pos_mat[i,j,1]=j
padded_pos = np.pad(pos_mat, ((half_size, half_size), (half_size, half_size), (0,0)))
padded_img = np.pad(img, ((half_size, half_size), (half_size, half_size), (0,0)))
padded_prob = | np.pad(prob, ((half_size, half_size), (half_size, half_size))) | numpy.pad |
#!/usr/bin/env python
# coding: utf-8
""""
Usage: python show_data.py
"""
# In[1]:
import numpy as np
from scipy import spatial
import math
import sys
import os
# ## 一、自定义函数
# ### 1.获取模型信息
# In[2]:
def get_edges(faces):
"""
根据面得到相应的边
@faces: 模型的所有面
return: 模型的边
"""
edge2key = dict()
edges = []
edges_count = 0
for face_id, face in enumerate(faces):
faces_edges = []
for i in range(3):
cur_edge = (face[i], face[(i + 1) % 3])
faces_edges.append(cur_edge)
for idx, edge in enumerate(faces_edges):
edge = tuple(sorted(list(edge)))
if edge not in edge2key:
edge2key[edge] = edges_count
edges_count += 1
edges.append(list(edge))
return edges
def parse_obje(obj_file):
"""
解析obj文件, 获取点,边,面
@obj_file: obj模型文件路径
return: 模型的点,边,面信息
"""
vs = []
faces = []
edges = []
with open(obj_file) as f:
for line in f:
line = line.strip()
splitted_line = line.split()
if not splitted_line:
continue
elif splitted_line[0] == 'v':
vs.append([float(v) for v in splitted_line[1:]])
elif splitted_line[0] == 'f':
try:
faces.append([int(c) - 1 for c in splitted_line[1:]])
except ValueError:
faces.append([int(c.split('/')[0]) - 1 for c in splitted_line[1:]])
elif splitted_line[0] == 'e':
if len(splitted_line) >= 4:
edge_v = [int(c) - 1 for c in splitted_line[1:-1]]
edge_c = int(splitted_line[-1])
edge_v.append(edge_c) # class
edges.append(edge_v)
else:
continue
vs = np.array(vs)
faces = np.array(faces, dtype=int)
# if len(edges) == 0:
# edges = get_edges(faces)
edges = np.array(edges)
return vs, faces, edges
# ### 2.根据边标记对面进行标记
# In[3]:
def label_face_by_edge(faces, edges, edge_labels):
"""
利用边标签对面进行标记
@faces: 模型的面
@edges: 模型的边
@edge_labels: 模型边对应的标签
return: 面的标签
"""
edge_dict = {} # key: str([pt1, pt2]) value: label
for ei, edge in enumerate(edges):
key = tuple(edge)
edge_dict[key] = edge_labels[ei]
# print(edge_dict)
face_labels = np.array(len(faces) * [[-1, -1, -1]])
for i, face in enumerate(faces):
for j in range(3):
cur_edge = [face[j], face[(j + 1) % 3]]
cur_label = edge_dict[tuple(sorted(cur_edge))]
face_labels[i][j] = cur_label
# face_labels.append(faces_edges)
face_labels = np.where(np.sum(face_labels, axis=1) < 2, 1, 2)
optimizer_face_labels(faces, face_labels) # 对面标签进行优化 膨胀操作 填充
return face_labels
def find_neighb_faces(face_id, faces):
face = faces[face_id]
nb_face = []
for i in range(3):
cur_edge = [face[i], face[(i + 1) % 3]]
pt1 = cur_edge[0]
pt2 = cur_edge[1]
face_ids = find_faces_by_2point(faces, pt1, pt2)
if len(face_ids) == 2:
nb_face_id = face_ids[0][0] if face_ids[0][0] != face_id else face_ids[1][0]
nb_face.append(nb_face_id)
return nb_face
def optimizer_face_labels(faces, face_labels):
# new_face_labels = face_labels.copy()
for i, face in enumerate(faces):
nb_faces = find_neighb_faces(i, faces)
nb_labels = []
for face_id in nb_faces:
nb_labels.append(face_labels[face_id])
if len(nb_labels) == 0:
continue
counts = np.bincount(nb_labels)
# 返回众数
if face_labels[i] != np.argmax(counts):
# print("face: {}, label:{} nb_labels: {}, 众数: {}".format(i, face_labels[i], nb_labels, np.argmax(counts)))
face_labels[i] = np.argmax(counts)
# ### 3.利用边对点进行标记
# In[4]:
def label_pts_by_edges(vs, edges, edge_labels):
"""
根据边标签,对点进行标注
@vs: 模型的点
@edge: 模型的边
@edge_labels: 模型边对应的标签
return: 模型点的标签
"""
pts_labels = np.array(len(vs) * [[-1, -1]])
for ei, edge in enumerate(edges):
edge_label = edge_labels[ei]
pt1 = edge[0]
pt2 = edge[1]
pts_labels[pt1][edge_label] = edge_label
pts_labels[pt2][edge_label] = edge_label
return pts_labels
# In[5]:
def find_faces_by_2point(faces, id1, id2):
"""
根据两个点确定以两点所在边为公共边的两个面
@faces: 所有面,N*3, 值表示点的id值
@id1: 第一个点的id值
@id2: 第二个点的id值
return: 2*3, [面的id,第一个点的位置, 第二个点的位置]
"""
p1_faces = np.argwhere(faces == id1) # 行id, 列id
p2_faces = np.argwhere(faces == id2)
intersection_faces = []
for val1 in p1_faces:
for val2 in p2_faces:
if val1[0] == val2[0]:
intersection_faces.append([val1[0], val1[1], val2[1]])
return intersection_faces
# In[6]:
def get_pts_from_edges(edges, threshold=30):
circle_pts = [[]]
circle_edges = [[]]
count = 0
while len(edges) > 0:
if len(circle_pts[count]) == 0:
circle_pts[count] = list(edges[0])
circle_edges[count].append(list(edges[0])) # 记录对应的边
edges = np.delete(edges, 0, axis=0)
else:
last_id = circle_pts[count][-1]
idx = np.where(edges == last_id)[0]
# 没有找到边
if len(idx) == 0:
circle_pts.append([])
circle_edges.append([])
count += 1
else:
edge = edges[idx[0]]
next_id = edge[0] if edge[0] != last_id else edge[1]
circle_pts[count].append(next_id)
circle_edges[count].append(list(edge))
edges = np.delete(edges, idx[0], axis=0)
pts_ids = []
for circle in circle_pts:
# 过滤短的
if len(circle) > threshold:
# print("{}".format(len(circle)))
circle = drop_cycle(circle, threshold) # 去闭环
# print("after drop cycle {}".format(len(circle)))
pts_ids.append(circle)
return pts_ids
def get_pts_from_edges_vs(edges, vs, threshold=30):
# ---- split_edges ----
circle_pts = [[]]
circle_edges = [[]]
count = 0
while len(edges) > 0:
if len(circle_pts[count]) == 0:
circle_pts[count] = list(edges[0])
circle_edges[count].append(edges[0]) # 记录对应的边
edges = np.delete(edges, 0, axis=0)
else:
last_id = circle_pts[count][-1]
idx = np.where(edges == last_id)[0]
# 没有找到边
if len(idx) == 0:
circle_pts.append([])
circle_edges.append([])
count += 1
else:
edge = edges[idx[0]]
next_id = edge[0] if edge[0] != last_id else edge[1]
circle_pts[count].append(next_id)
circle_edges[count].append(edge)
edges = np.delete(edges, idx[0], axis=0)
# ---- 2.过滤掉长度不符合要求的 (噪声点)----
filter_edges = []
filter_pts = []
for idx, circle in enumerate(circle_edges):
# print(len(circle))
if len(circle) < threshold:
continue
else:
filter_edges.append(circle)
circle_pt = drop_cycle(circle_pts[idx], threshold)
filter_pts.append(circle_pt)
# # save pts
# for idx, pts_id in enumerate(filter_pts):
# save_dir = "./test_circle"
# pts = vs[pts_id]
# np.savetxt(os.path.join(save_dir, "predict_class" + str(idx + 1) + ".pts"), pts)
# with open(os.path.join(save_dir, "predict_class" + str(idx + 1) + ".pts"), 'r+') as f:
# content = f.read()
# f.seek(0, 0)
# f.write('BEGIN\n' + content)
# with open(os.path.join(save_dir, "predict_class" + str(idx + 1) + ".pts"), 'a') as f:
# f.write('END\n')
return filter_pts
# # ---- 3. 合并 ----
# # 长度为1直接返回, 如果长度大于1,说明有2个或者以上的闭环(3个或以上需要按x顺序合并),这时可以先将各闭环对应的边找出,上一步骤保存即可
# # 然后利用KDTree找出两个集合中最近的两条边,断开重组(重组时根据x,y值避免缠绕)即删除两条边,构造两条虚拟边
# n_circle = len(filter_edges)
# if 0 == n_circle:
# return []
# elif 1 == n_circle:
# pts_ids = []
# for circle in filter_pts:
# circle = drop_cycle(circle, threshold) # 去闭环
# pts_ids.append(circle)
# return pts_ids
# else:
# # TODO 3个以上根据x的范围进行排序,可过滤掉包含在大圈里面的部分 (这样处理有些有问题 如:APDXA_VS_SET_VSc2_Subsetup4_Maxillar)
# # 找最近的边,进行破圈
# last_edges = filter_edges[0]
# vs_edges = vs[last_edges].reshape(len(last_edges), -1)
# tree = spatial.KDTree(vs_edges)
# for i in range(1, n_circle):
# cur_edges = filter_edges[i]
# min_dist = np.inf
# min_index = -1
# cur_edge = cur_edges[0]
# cur_e_idx = 0
# for e_idx, e in enumerate(cur_edges):
# vs_e = np.append(vs[e[0]], vs[e[1]])
# dist, dist_idx = tree.query(vs_e)
# if dist < min_dist:
# min_dist = dist
# min_index = dist_idx
# cur_edge = e
# cur_e_idx = e_idx
#
# # 迭代
# # 上一个闭环中最近的边
# last_edge = last_edges[min_index]
# last_edge_y1 = vs[last_edge[0]][1]
# last_edge_y2 = vs[last_edge[1]][1]
# last_lower_y_idx = 0 if last_edge_y1 < last_edge_y2 else 1
# # 本次闭环中最近的边
# cur_edge_y1 = vs[cur_edge[0]][1]
# cur_edge_y2 = vs[cur_edge[1]][1]
# cur_lower_y_idx = 0 if cur_edge_y1 < cur_edge_y2 else 1
# # 根据y值重新组合两条边
# edge_1 = [[last_edge[last_lower_y_idx], cur_edge[cur_lower_y_idx]]]
# edge_2 = [[last_edge[1-last_lower_y_idx], cur_edge[1-cur_lower_y_idx]]]
# # 重新生成last_edges
# last_edges = last_edges[:min_index] + last_edges[min_index+1:] + cur_edges[:cur_e_idx] + cur_edges[cur_e_idx+1:]
# last_edges = last_edges + edge_1 + edge_2
# if i + 1 < n_circle: # 小于才重新构建tree
# vs_edges = vs[last_edges].reshape(len(last_edges), -1)
# tree = spatial.KDTree(vs_edges)
#
# # 按边将点拼接成一个闭环
# circle_pts = [[]]
# count = 0
# while len(last_edges) > 0:
# if len(circle_pts[count]) == 0:
# circle_pts[count] = list(last_edges[0])
# last_edges = np.delete(last_edges, 0, axis=0)
# else:
# last_id = circle_pts[count][-1]
# idx = np.where(last_edges == last_id)[0]
# # 没有找到边
# if len(idx) == 0:
# circle_pts.append([])
# count += 1
# else:
# edge = last_edges[idx[0]]
# next_id = edge[0] if edge[0] != last_id else edge[1]
# circle_pts[count].append(next_id)
# last_edges = np.delete(last_edges, idx[0], axis=0)
# pts_ids = []
# for circle in circle_pts:
# # 过滤短的
# if len(circle) > threshold:
# # print("{}".format(len(circle)))
# circle = drop_cycle(circle, threshold) # 去闭环
# # print("after drop cycle {}".format(len(circle)))
# pts_ids.append(circle)
# return pts_ids
def drop_cycle(edge, max_length=20):
"""
删除列表中形成的小闭环
@edge: 原始顶点id
@max_length: 容许闭环的最小长度
return: 输出删除小闭环后的列表
"""
drop_list = []
drop_count = 0
for i, item in enumerate(edge):
if item not in drop_list:
drop_list.append(item)
else:
last_index = len(drop_list) - 1 - drop_list[::-1].index(item)
if i - last_index - drop_count < max_length:
drop_count += len(drop_list[last_index:])
drop_list = drop_list[:last_index+1]
else:
drop_list.append(item)
# 去掉首尾构成的闭环 如: [956 1035 1538 ...... 2028 1035 952 956] ==> 1035->952->956->1035
circle_count = np.where(np.bincount(drop_list) >= 2)[0]
for item in circle_count:
if item == drop_list[0]:
continue
try:
first_id = drop_list.index(item)
last_id = drop_list[::-1].index(item)
if first_id + last_id <= max_length:
length = len(drop_list)
drop_list = drop_list[first_id:length-last_id]
except ValueError:
continue
return np.asarray(drop_list)
# def label_pts_by_edges_and_faces(vs, edges, faces, face_labels):
# """
# 根据边和面标签,对点进行标注,一条边对应两个面,如果两个面标签不同,则保留点
# @vs: 模型的点
# @edges: 模型的边
# @faces: 模型的面
# @face_labels: 模型面对应的标签
# return: 模型边界点
# """
# pts_labels = np.array(len(vs) * [False])
# for ei, edge in enumerate(edges):
# pt1 = edge[0]
# pt2 = edge[1]
# face_ids = find_faces_by_2point(faces, pt1, pt2)
# if len(face_ids) == 2:
# if face_labels[face_ids[0][0]] != face_labels[face_ids[1][0]]:
# pts_labels[pt1] = True
# pts_labels[pt2] = True
#
# return vs[pts_labels]
def label_pts_by_edges_and_faces(vs, edges, faces, face_labels):
"""
根据边和面标签,对点进行标注,一条边对应两个面,如果两个面标签不同,则保留点
@vs: 模型的点
@edges: 模型的边
@faces: 模型的面
@face_labels: 模型面对应的标签
return: 模型边界点
"""
# pts_labels = np.array(len(vs) * [False])
edge_idx = []
for ei, edge in enumerate(edges):
pt1 = edge[0]
pt2 = edge[1]
face_ids = find_faces_by_2point(faces, pt1, pt2)
# TODO 对于边界边会误删
if len(face_ids) == 2:
if face_labels[face_ids[0][0]] != face_labels[face_ids[1][0]]:
edge_idx.append(ei)
test_edges = np.asarray(edges[edge_idx])
# print("test_edges:", len(test_edges))
pts_ids = get_pts_from_edges_vs(test_edges, vs, 10)
# np.savetxt("./pts_ids.txt", pts_ids, fmt="%d")
# np.savetxt("./vs.txt", vs)
# pts_ids = get_pts_from_edges(test_edges)
# print("pts_ids: ", pts_ids)
res_vs = np.array([])
face_normals, face_areas = compute_face_normals_and_areas(vs, faces) # 计算面的法向量
for idx, pts_id in enumerate(pts_ids):
# idx = np.append(idx, pts_id)
temp = []
temp.append(pts_id[0])
for i in range(1, len(pts_id) - 1):
last_pt = pts_id[i - 1]
cur_pt = pts_id[i]
next_pt = pts_id[i + 1]
a = vs[last_pt] - vs[cur_pt]
b = vs[next_pt] - vs[cur_pt]
y = a[0] * b[0] + a[1] * b[1] + a[2] * b[2]
x = math.sqrt(a[0] * a[0] + a[1] * a[1] + a[2] * a[2]) * math.sqrt(b[0] * b[0] + b[1] * b[1] + b[2] * b[2])
# 计算三点形成的夹角
theta = math.acos(y / x) / math.pi * 180 # 不存在重合点 x可以不用判零
if theta > 50:
curvature = compute_pt_curvature(vs, edges, faces, face_normals, cur_pt)
if max(curvature) > 0:
temp.append(cur_pt)
temp.append(pts_id[-1])
res_vs = np.append(res_vs, vs[temp])
if idx != len(pts_ids) - 1:
res_vs = np.append(res_vs, np.array([0, 0, 0]))
return res_vs
def compute_face_normals_and_areas(vs, faces):
"""
计算每个面的法向量和面积
"""
face_normals = np.cross(vs[faces[:, 1]] - vs[faces[:, 0]],
vs[faces[:, 2]] - vs[faces[:, 1]])
# >>> deal zero face >>>
zeros_idx = | np.argwhere((face_normals[:, 0] == 0) & (face_normals[:, 1] == 0) & (face_normals[:, 2] == 0)) | numpy.argwhere |
#Extract features for static gestures recognition
import cv2 as cv
import numpy as np
from time import time
from sklearn.externals import joblib
import os
from scipy.cluster.vq import *
from sklearn.preprocessing import StandardScaler
import sys
from tqdm import tqdm
class HOGFeatures:
def __init__(self):
#image = cv.imread("test.jpg",0)
self.winSize = (80,80)
self.blockSize = (16,16)
self.blockStride = (8,8)
self.cellSize = (8,8)
self.nbins = 9
self.derivAperture = 1
self.winSigma = 4.
self.histogramNormType = 0
self.L2HysThreshold = 2.0000000000000001e-01
self.gammaCorrection = 1
#nlevels = 10
self.hog = cv.HOGDescriptor(self.winSize,self.blockSize,self.blockStride,self.cellSize,self.nbins,self.derivAperture,self.winSigma,self.histogramNormType,self.L2HysThreshold,self.gammaCorrection)#,nlevels)
#
self.winStride = (16,16)
self.padding = (8,8)
def dimensionReductionPCA(self, feature):
#pca = cv.PCACompute(feature, mean=None, maxComponents=1000)
#pca = cv.PCA(feature, mean=None, maxComponents=1000)
t0 = time()
mean, eigenvectors = cv.PCACompute(feature, mean=None, maxComponents=1000)
print("PCA compute took ", time()-t0,"seconds to run")
#reduced_features = pca.project(feature)
t1 = time()
reduced_features = cv.PCAProject(feature, mean, eigenvectors)
print("PCA project took ", time()-t1,"seconds to run")
return reduced_features
def extractFeatures(self, image):
im = cv.resize(image,(80, 80), interpolation = cv.INTER_CUBIC)
fd = self.hog.compute(im,self.winStride,self.padding)
return fd
class HOGPCAFeatures:
def __init__(self):
#image = cv.imread("test.jpg",0)
self.winSize = (80,80)
self.blockSize = (16,16)
self.blockStride = (8,8)
self.cellSize = (8,8)
self.nbins = 9
self.derivAperture = 1
self.winSigma = 4.
self.histogramNormType = 0
self.L2HysThreshold = 2.0000000000000001e-01
self.gammaCorrection = 1
#nlevels = 10
self.hog = cv.HOGDescriptor(self.winSize,self.blockSize,self.blockStride,self.cellSize,self.nbins,self.derivAperture,self.winSigma,self.histogramNormType,self.L2HysThreshold,self.gammaCorrection)#,nlevels)
#
self.winStride = (16,16)
self.padding = (8,8)
self.pcapath = "/media/1tb/datasets/libras_configurations/chroma_videos/features/"
self.mean_name = "pcahog_skin.mean"
self.eigenvectors_names = "pcahog_skin.eigv"
self.mean_path = os.path.join(self.pcapath, self.mean_name)
self.eigenvectors_path = os.path.join(self.pcapath, self.eigenvectors_names)
self.reduced_features_name = "reduced_features1000.feat"
self.features_path = os.path.join(self.pcapath, self.reduced_features_name)
#self.mean = joblib.load(self.mean_path)
#self.eigenvector = joblib.load(self.eigenvectors_path)
def computePCA(self, feature):
#pca = cv.PCACompute(feature, mean=None, maxComponents=1000)
#pca = cv.PCA(feature, mean=None, maxComponents=1000)
t0 = time()
mean, eigenvectors = cv.PCACompute(feature, mean=None, maxComponents=2000)
print("PCA compute took ", time()-t0,"seconds to run")
#reduced_features = pca.project(feature)
print("Saving Means and eigenvectors")
#joblib.dump(mean, self.mean_path, compress=True)
#joblib.dump(eigenvectors, self.eigenvectors_path, compress=True)
return mean, eigenvectors
def projectPCA(self, mean, eigenvectors, feature):
t1 = time()
reduced_features = cv.PCAProject(feature, mean, eigenvectors)
print("PCA project took ", time()-t1,"seconds to run")
return reduced_features
def extractFeatures(self, image):
im = cv.resize(image,(80, 80), interpolation = cv.INTER_CUBIC)
fd = self.hog.compute(im,self.winStride,self.padding)
return fd
def extract(self, image):
#print(self.mean.shape)
#print(self.eigenvector.shape)
features = self.extractFeatures(image)
print(features.shape)
features = np.transpose(features)
print(features.shape)
reduced_features = self.projectPCA(self.mean, self.eigenvector, features)
return reduced_features
class HUFeatures:
def __init__(self):
self.initial =0
def getContours(self, img):
contours,hierarchy = cv.findContours(img,cv.RETR_EXTERNAL,cv.CHAIN_APPROX_SIMPLE)
#contours_retain=[]
#for cnt in contours:
# if cv.contourArea(cnt)>500:
# contours_retain.append(cnt)
c = max(contours, key = cv.contourArea)
#cv.drawContours(img,contours_retain,-1,(255,0,255),3)
return c
def extractFeatures(self, img):
image = cv.resize(img,(200, 200), interpolation = cv.INTER_CUBIC)
c = self.getContours(image)
#for cnt in contours_retain:
# print(cv.HuMoments(cv.moments(cnt)).flatten())
ft = cv.HuMoments(cv.moments(c)).flatten()
return ft
class GaborFeatures:
def __init__(self):
self.x = 0
self.filters = self.build_filters()
# define gabor filter bank with different orientations and at different scales
def build_filters(self):
filters = []
ksize = 9
#define the range for theta and nu
for theta in np.arange(0, np.pi, np.pi / 8):
for nu in np.arange(0, 6*np.pi/4 , np.pi / 4):
kern = cv.getGaborKernel((ksize, ksize), 1.0, theta, nu, 0.5, 0, ktype=cv.CV_32F)
kern /= 1.5*kern.sum()
filters.append(kern)
return filters
#function to convolve the image with the filters
def process(self, img, filters):
accum = np.zeros_like(img)
for kern in filters:
fimg = cv.filter2D(img, cv.CV_8UC3, kern)
np.maximum(accum, fimg, accum)
return accum
def extractFeatures(self, img):
#instantiating the filters
img = cv.resize(img,(128, 128), interpolation = cv.INTER_CUBIC)
#filters = self.build_filters()
f = np.array(self.filters, dtype='int64')
#initializing the feature vector
feat = []
#calculating the local energy for each convolved image
for j in range(40):
res = self.process(img, f[j])
#print(res.shape)
temp = np.dtype('int64').type(0)
for p in range(128):
for q in range(128):
temp = temp + res[p][q]*res[p][q]
#print(temp)
feat.append(temp)
#calculating the mean amplitude for each convolved image
for j in range(40):
res = self.process(img, f[j])
#print(res.shape)
temp = | np.dtype('int64') | numpy.dtype |
import pandas as pd
import os
import logging
import pickle
import numpy as np
import multiprocessing
import spikeinterface.extractors as se
import spikeinterface.sorters as ss
logger = logging.getLogger('ceciestunepipe.util.spike.kilosort')
N_JOBS_MAX = multiprocessing.cpu_count()-1
def run_spikesort(recording_extractor: se.RecordingExtractor,
logger: logging.Logger,
sort_pickle_path: str,
tmp_dir: str,
grouping_property: str=None,
sorting_method: str='kilosort3',
n_jobs_bin: int=N_JOBS_MAX,
chunk_mb: int=512, restrict_to_gpu=None,
**sort_kwargs):
logger.info("Grouping property: {}".format(grouping_property))
logger.info("sorting method: {}".format(sorting_method))
# try:
if sorting_method == "kilosort2":
# perform kilosort sorting
sort_tmp_dir = os.path.join(tmp_dir, 'tmp_ks2')
logger.info('Sorting tmp dir {}'.format(sort_tmp_dir))
if restrict_to_gpu is not None:
logger.info('Will set visible gpu devices {}'.format(restrict_to_gpu))
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = "{}".format(restrict_to_gpu)
sort = ss.run_kilosort2(
recording_extractor,
car=True,
output_folder=sort_tmp_dir,
parallel=True,
verbose=True,
grouping_property=grouping_property,
chunk_mb=chunk_mb,
n_jobs_bin=n_jobs_bin,
**sort_kwargs
)
elif sorting_method == "kilosort3":
# perform kilosort sorting
sort_tmp_dir = os.path.join(tmp_dir, 'tmp_ks3')
logger.info('Sorting tmp dir {}'.format(sort_tmp_dir))
if restrict_to_gpu is not None:
logger.info('Will set visible gpu devices {}'.format(restrict_to_gpu))
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = "{}".format(restrict_to_gpu)
sort = ss.run_kilosort3(
recording_extractor,
car=True,
output_folder=sort_tmp_dir,
parallel=True,
verbose=True,
grouping_property=grouping_property,
chunk_mb=chunk_mb,
**sort_kwargs
)
else:
raise NotImplementedError('Only know how to sort kilosort2/3 for now, \
will deal with {} later'.format(sorting_method))
logger.info('done sorting')
# # save sort
# logger.info("Saving sort {}".format(sort_pickle_path))
# with open(sort_pickle_path, "wb") as output:
# pickle.dump(sort, output, pickle.HIGHEST_PROTOCOL)
# logger.info("Sorting output saved to {}".format(sort_pickle_path))
# # save sort again with all that processed data
# sort_temp_pickle_path = sort_pickle_path + '.dump.pkl'
# logger.info("Saving sort {}".format(sort_temp_pickle_path))
# sort.dump_to_pickle(sort_temp_pickle_path)
return sort
def load_spikes(ks_folder:str) -> tuple:
spk_dict = {k: np.load(os.path.join(ks_folder,
'spike_{}.npy'.format(k))).flatten() for k in ['times', 'clusters']}
spk_dict['cluster_id'] = spk_dict['clusters']
spk_df = pd.DataFrame(spk_dict)
clu_df = pd.read_csv(os.path.join(ks_folder, 'cluster_KSLabel.tsv'),
sep='\t', header=0)
# get the templates
templ_arr = np.load(os.path.join(ks_folder, 'templates.npy'))
clu_df['template'] = [x for x in templ_arr]
# with the templates, compute the sorted chanels, main channel, main 7 channels and waveform for the 7 channels
clu_df['max_chans'] = clu_df['template'].apply(lambda x: np.argsort(np.ptp(x, axis=0))[::-1])
clu_df['main_chan'] = clu_df['max_chans'].apply(lambda x: x[0])
clu_df['main_7'] = clu_df['max_chans'].apply(lambda x: | np.sort(x[:7]) | numpy.sort |
import numpy as _np
import math as _math
def read_energy_acceptance_file(fname, eRF):
# reads raw data from file
lines = [line.strip() for line in open(fname)]
# processes raw data
accp, accn = [], []
for line in lines:
if not line or line[0] == '#':
continue
values = [float(word) for word in line.split()]
pos, e_ac = values[4], values[7]
if e_ac > 0.0:
accp.append([pos,min(abs(e_ac),eRF)])
else:
accn.append([pos,min(abs(e_ac),eRF)])
accp = _np.array(accp)
accn = _np.array(accn)
return (accp,accn)
def read_twiss_file(fname, param):
# reads raw data from file
lines = [line.strip() for line in open(fname)]
# processes raw data into twiss and element structures
twiss, elements = [], []
for line in lines:
words = line.split()
if not words or words[0][0] == '*':
continue
if words[0][0] == '#':
if words[0] == '#I1':
param['I1'] += float(words[1])
elif words[0] == '#I2':
param['I2'] += float(words[1])
elif words[0] == '#I3':
param['I3'] += float(words[1])
elif words[0] == '#I4':
param['I4'] += float(words[1])
elif words[0] == '#I5':
param['I5'] += float(words[1])
elif words[0] == '#I6':
param['I6'] += float(words[1])
else:
pass
continue
if words[0][0] == '@':
if words[1] == 'K_beta':
param['k_beta'] = float(words[3])
elif words[1] == 'K_dw':
param['k_dw'] = float(words[3])-param['k_beta']
elif words[1] == 'EX':
param['ex0'] = float(words[3])
else:
if float(words[3]) > 0:
values = [float(word) for word in words[2:]]
values = values + [0,0] # for acceptances insertion latter on
#print(values)
twiss.append(values)
elements.append(words[0])
twiss = _np.array(twiss)
elements = | _np.array(elements) | numpy.array |
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy.linalg import inv, solve
from numpy.testing import TestCase, rand, run_module_suite, assert_raises, \
assert_equal, assert_almost_equal, assert_array_almost_equal, assert_, \
assert_allclose
from numpy.testing.noseclasses import KnownFailureTest
from scipy.linalg import solve_sylvester, solve_lyapunov, \
solve_discrete_lyapunov, solve_continuous_are, solve_discrete_are
class TestSolveLyapunov(TestCase):
cases = [
(np.array([[1, 2], [3, 4]]),
np.array([[9, 10], [11, 12]])),
# a, q all complex.
(np.array([[1.0+1j, 2.0], [3.0-4.0j, 5.0]]),
np.array([[2.0-2j, 2.0+2j],[-1.0-1j, 2.0]])),
# a real; q complex.
(np.array([[1.0, 2.0], [3.0, 5.0]]),
np.array([[2.0-2j, 2.0+2j],[-1.0-1j, 2.0]])),
# a complex; q real.
(np.array([[1.0+1j, 2.0], [3.0-4.0j, 5.0]]),
np.array([[2.0, 2.0],[-1.0, 2.0]])),
# An example from Kitagawa, 1977
(np.array([[3, 9, 5, 1, 4], [1, 2, 3, 8, 4], [4, 6, 6, 6, 3],
[1, 5, 2, 0, 7], [5, 3, 3, 1, 5]]),
np.array([[2, 4, 1, 0, 1], [4, 1, 0, 2, 0], [1, 0, 3, 0, 3],
[0, 2, 0, 1, 0], [1, 0, 3, 0, 4]])),
# Companion matrix example. a complex; q real; a.shape[0] = 11
(np.array([[0.100+0.j, 0.091+0.j, 0.082+0.j, 0.073+0.j, 0.064+0.j,
0.055+0.j, 0.046+0.j, 0.037+0.j, 0.028+0.j, 0.019+0.j,
0.010+0.j],
[1.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j,
0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j,
0.000+0.j],
[0.000+0.j, 1.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j,
0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j,
0.000+0.j],
[0.000+0.j, 0.000+0.j, 1.000+0.j, 0.000+0.j, 0.000+0.j,
0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j,
0.000+0.j],
[0.000+0.j, 0.000+0.j, 0.000+0.j, 1.000+0.j, 0.000+0.j,
0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j,
0.000+0.j],
[0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 1.000+0.j,
0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j,
0.000+0.j],
[0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j,
1.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j,
0.000+0.j],
[0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j,
0.000+0.j, 1.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j,
0.000+0.j],
[0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j,
0.000+0.j, 0.000+0.j, 1.000+0.j, 0.000+0.j, 0.000+0.j,
0.000+0.j],
[0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j,
0.000+0.j, 0.000+0.j, 0.000+0.j, 1.000+0.j, 0.000+0.j,
0.000+0.j],
[0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j,
0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 1.000+0.j,
0.000+0.j]]),
np.eye(11)),
# https://github.com/scipy/scipy/issues/4176
(np.matrix([[0, 1], [-1/2, -1]]),
(np.matrix([0, 3]).T * np.matrix([0, 3]).T.T)),
# https://github.com/scipy/scipy/issues/4176
(np.matrix([[0, 1], [-1/2, -1]]),
(np.array(np.matrix([0, 3]).T * np.matrix([0, 3]).T.T))),
]
def check_continuous_case(self, a, q):
x = solve_lyapunov(a, q)
assert_array_almost_equal(np.dot(a, x) + np.dot(x, a.conj().transpose()), q)
def check_discrete_case(self, a, q, method=None):
x = solve_discrete_lyapunov(a, q, method=method)
assert_array_almost_equal(np.dot(np.dot(a, x),a.conj().transpose()) - x, -1.0*q)
def test_cases(self):
for case in self.cases:
self.check_continuous_case(case[0], case[1])
self.check_discrete_case(case[0], case[1])
self.check_discrete_case(case[0], case[1], method='direct')
self.check_discrete_case(case[0], case[1], method='bilinear')
class TestSolveContinuousARE(TestCase):
cases = [
# An example from <NAME>.
# (http://dspace.mit.edu/bitstream/handle/1721.1/1301/R-0859-05666488.pdf)
(np.matrix([[0, 1], [0, 0]]),
np.matrix([[0,], [1,]]),
np.matrix([[1, 0], [0, 2]]),
np.matrix([[1,],])),
# Difficult from a numerical standpoint, again from <NAME>.
(np.matrix([[4, 3], [-9.0/2.0, -7.0/2.0]]),
np.matrix([[1,], [-1,]]),
np.matrix([[9, 6], [6, 4]]),
np.matrix([[1,],])),
# Complex a; real b, q, r
(np.matrix([[0, 1-2j], [0, -3j]]),
np.matrix([[0,], [1,]]),
np.matrix([[1, 0], [0, 2]]),
np.matrix([[1,],])),
# Real a, q, r; complex b
(np.matrix([[0, 1], [0, -1]]),
np.matrix([[-2j,], [1j,]]),
np.matrix([[1, 0], [0, 2]]),
np.matrix([[1,],])),
# Real a, b; complex q, r
(np.matrix([[0, 1], [0, -1]]),
np.matrix([[1, 2], [1, 3]]),
np.matrix([[1, -3j], [1-1j, 2]]),
np.matrix([[-2j, 2], [1j, 3]])),
]
def check_case(self, a, b, q, r):
"""Checks if (A'X + XA - XBR^-1B'X+Q=0) is true"""
x = solve_continuous_are(a, b, q, r)
assert_array_almost_equal(
a.getH()*x + x*a - x*b*inv(r)*b.getH()*x + q, 0.0)
def test_cases(self):
for case in self.cases:
self.check_case(case[0], case[1], case[2], case[3])
def test_solve_discrete_are():
cases = [
# Darex examples taken from (with default parameters):
# [1] P.BENNER, <NAME>, <NAME>: 'A Collection of Benchmark
# Examples for the Numerical Solution of Algebraic Riccati
# Equations II: Discrete-Time Case', Tech. Report SPC 95_23,
# <NAME>, TU Chemnitz-Zwickau (Germany), 1995.
# [2] <NAME>, <NAME>, <NAME>: 'Scaling of the
# Discrete-Time Algebraic Riccati Equation to Enhance Stability
# of the Schur Solution Method', IEEE Trans.Aut.Cont., vol.37(4)
#
# The format of the data is (a, b, q, r, knownfailure), where
# knownfailure is None if the test passes or a string
# indicating the reason for failure.
#
# Complex a; real b, q, r
(np.array([[2, 1-2j], [0, -3j]]),
np.array([[0,], [1,]]),
np.array([[1, 0], [0, 2]]),
np.array([[1,],]),
None),
# Real a, q, r; complex b
(np.array([[2, 1], [0, -1]]),
np.array([[-2j,], [1j,]]),
np.array([[1, 0], [0, 2]]),
np.array([[1,],]),
None),
# Real a, b; complex q, r
(np.array([[3, 1], [0, -1]]),
np.array([[1, 2], [1, 3]]),
np.array([[1, 1+1j], [1-1j, 2]]),
np.array([[2, -2j], [2j, 3]]),
None),
# User-reported gh-2251 (Trac #1732)
(np.array([[0.63399379, 0.54906824, 0.76253406],
[0.5404729, 0.53745766, 0.08731853],
[0.27524045, 0.84922129, 0.4681622]]),
np.array([[0.96861695],[0.05532739],[0.78934047]]),
np.eye(3),
np.eye(1),
None),
# darex #1
(np.array([[4, 3],[-4.5, -3.5]]),
np.array([[1],[-1]]),
np.array([[9, 6],[6, 4]]),
np.array([[1]]),
None),
# darex #2
(np.array([[0.9512, 0],[0, 0.9048]]),
np.array([[4.877, 4.877],[-1.1895, 3.569]]),
np.array([[0.005, 0],[0, 0.02]]),
np.array([[1/3, 0],[0, 3]]),
None),
# darex #3
(np.array([[2, -1],[1, 0]]),
np.array([[1],[0]]),
np.array([[0, 0],[0, 1]]),
np.array([[0]]),
None),
# darex #4 (skipped the gen. Ric. term S)
(np.array([[0, 1],[0, -1]]),
np.array([[1, 0],[2, 1]]),
np.array([[-4, -4],[-4, 7]]) * (1/11),
np.array([[9, 3],[3, 1]]),
None),
# darex #5
(np.array([[0, 1],[0, 0]]),
np.array([[0],[1]]),
np.array([[1, 2],[2, 4]]),
np.array([[1]]),
None),
# darex #6
(np.array([[0.998, 0.067, 0, 0],
[-.067, 0.998, 0, 0],
[0, 0, 0.998, 0.153],
[0, 0, -.153, 0.998]]),
np.array([[0.0033, 0.0200],
[0.1000, -.0007],
[0.0400, 0.0073],
[-.0028, 0.1000]]),
np.array([[1.87, 0, 0, -0.244],
[0, 0.744, 0.205, 0],
[0, 0.205, 0.589, 0],
[-0.244, 0, 0, 1.048]]),
np.eye(2),
None),
# darex #7
(np.array([[0.984750, -.079903, 0.0009054, -.0010765],
[0.041588, 0.998990, -.0358550, 0.0126840],
[-.546620, 0.044916, -.3299100, 0.1931800],
[2.662400, -.100450, -.9245500, -.2632500]]),
np.array([[0.0037112, 0.0007361],
[-.0870510, 9.3411e-6],
[-1.198440, -4.1378e-4],
[-3.192700, 9.2535e-4]]),
np.eye(4)*1e-2,
np.eye(2),
None),
# darex #8
(np.array([[-0.6000000, -2.2000000, -3.6000000, -5.4000180],
[1.0000000, 0.6000000, 0.8000000, 3.3999820],
[0.0000000, 1.0000000, 1.8000000, 3.7999820],
[0.0000000, 0.0000000, 0.0000000, -0.9999820]]),
np.array([[1.0, -1.0, -1.0, -1.0],
[0.0, 1.0, -1.0, -1.0],
[0.0, 0.0, 1.0, -1.0],
[0.0, 0.0, 0.0, 1.0]]),
np.array([[2, 1, 3, 6],
[1, 2, 2, 5],
[3, 2, 6, 11],
[6, 5, 11, 22]]),
np.eye(4),
None),
# darex #9
(np.array([[95.4070, 1.9643, 0.3597, 0.0673, 0.0190],
[40.8490, 41.3170, 16.0840, 4.4679, 1.1971],
[12.2170, 26.3260, 36.1490, 15.9300, 12.3830],
[4.1118, 12.8580, 27.2090, 21.4420, 40.9760],
[0.1305, 0.5808, 1.8750, 3.6162, 94.2800]]) * 0.01,
np.array([[0.0434, -0.0122],
[2.6606, -1.0453],
[3.7530, -5.5100],
[3.6076, -6.6000],
[0.4617, -0.9148]]) * 0.01,
np.eye(5),
np.eye(2),
None),
# darex #10
(np.kron(np.eye(2),np.diag([1,1],k=1)),
np.kron(np.eye(2),np.array([[0],[0],[1]])),
np.array([[1, 1, 0, 0, 0, 0],
[1, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, -1, 0],
[0, 0, 0, -1, 1, 0],
[0, 0, 0, 0, 0, 0]]),
np.array([[3, 0],[0, 1]]),
None),
# darex #11
(0.001 * np.array(
[[870.1, 135.0, 11.59, .5014, -37.22, .3484, 0, 4.242, 7.249],
[76.55, 897.4, 12.72, 0.5504, -40.16, .3743, 0, 4.53, 7.499],
[-127.2, 357.5, 817, 1.455, -102.8, .987, 0, 11.85, 18.72],
[-363.5, 633.9, 74.91, 796.6, -273.5, 2.653, 0, 31.72, 48.82],
[-960, 1645.9, -128.9, -5.597, 71.42, 7.108, 0, 84.52, 125.9],
[-664.4, 112.96, -88.89, -3.854, 84.47, 13.6, 0, 144.3, 101.6],
[-410.2, 693, -54.71, -2.371, 66.49, 12.49, .1063, 99.97, 69.67],
[-179.9, 301.7, -23.93, -1.035, 60.59, 22.16, 0, 213.9, 35.54],
[-345.1, 580.4, -45.96, -1.989, 105.6, 19.86, 0, 219.1, 215.2]]),
np.array([[4.7600, -0.5701, -83.6800],
[0.8790, -4.7730, -2.7300],
[1.4820, -13.1200, 8.8760],
[3.8920, -35.1300, 24.8000],
[10.3400, -92.7500, 66.8000],
[7.2030, -61.5900, 38.3400],
[4.4540, -36.8300, 20.2900],
[1.9710, -15.5400, 6.9370],
[3.7730, -30.2800, 14.6900]]) * 0.001,
np.diag([50, 0, 0, 0, 50, 0, 0, 0, 0]),
np.eye(3),
None),
# darex #12
(np.array([[0, 1e6],[0, 0]]),
np.array([[0],[1]]),
np.eye(2),
np.array([[1]]),
"Bad absolute accuracy"),
# darex #13
(np.array([[16, 10, -2],
[10, 13, -8],
[-2, -8, 7]]) * (1/9),
np.eye(3),
1e6 * np.eye(3),
1e6 * np.eye(3),
"Fails to find a valid solution"),
# darex #14
(np.array([[1 - 1/1e8, 0, 0, 0],
[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 1, 0]]),
np.array([[1e-08],[0],[0],[0]]),
np.diag([0, 0, 0, 1]),
np.array([[0.25]]),
"Bad absolute accuracy"),
# darex #15
(np.eye(100, k=1),
np.flipud(np.eye(100, 1)),
np.eye(100),
np.array([[1]]),
None)
]
def _test_factory(case):
"""Checks if X = A'XA-(A'XB)(R+B'XB)^-1(B'XA)+Q) is true"""
a, b, q, r, knownfailure = case
if knownfailure:
raise KnownFailureTest(knownfailure)
x = solve_discrete_are(a, b, q, r)
res = a.conj().T.dot(x.dot(a)) - x + q
res -= a.conj().T.dot(x.dot(b)).dot(
solve(r+b.conj().T.dot(x.dot(b)),b.conj().T).dot(x.dot(a))
)
assert_array_almost_equal(res,np.zeros_like(res))
for case in cases:
yield _test_factory, case
class TestSolveSylvester(TestCase):
cases = [
# a, b, c all real.
(np.array([[1, 2], [0, 4]]),
np.array([[5, 6], [0, 8]]),
np.array([[9, 10], [11, 12]])),
# a, b, c all real, 4x4. a and b have non-trival 2x2 blocks in their
# quasi-triangular form.
(np.array([[1.0, 0, 0, 0], [0, 1.0, 2.0, 0.0], [0, 0, 3.0, -4], [0, 0, 2, 5]]),
np.array([[2.0, 0, 0,1.0], [0, 1.0, 0.0, 0.0], [0, 0, 1.0, -1], [0, 0, 1, 1]]),
np.array([[1.0, 0, 0, 0], [0, 1.0, 0, 0], [0, 0, 1.0, 0], [0, 0, 0, 1.0]])),
# a, b, c all complex.
(np.array([[1.0+1j, 2.0], [3.0-4.0j, 5.0]]),
np.array([[-1.0, 2j], [3.0, 4.0]]),
np.array([[2.0-2j, 2.0+2j],[-1.0-1j, 2.0]])),
# a and b real; c complex.
( | np.array([[1.0, 2.0], [3.0, 5.0]]) | numpy.array |
from __future__ import print_function, division
import numpy as np
import R_smooth as sm
from matplotlib import pyplot as plt
Eradius = 6371315.0 # Earth radius as used in Croco (cf. scalars.h), m
def get_vslab(xm,ym,Lslab,lcenter,dl,ang,xx,yy,doverb=True):
# define lon, lat points ov vertical section
(indxm,indym) = np.unravel_index(((xx-xm)**2+(yy-ym)**2).argmin(),xx.shape)
Nl = int(Lslab/dl) + int((Lslab//dl)%2==0) # odd
lonslab = xm + np.arange(-(Nl//2)*dl-lcenter,(Nl//2+1)*dl-lcenter,dl)\
/Eradius*np.cos(ang)*180./np.pi
latslab = ym + | np.arange(-(Nl//2)*dl-lcenter,(Nl//2+1)*dl-lcenter,dl) | numpy.arange |
import pandas as pd
import numpy as np
import math, statistics, functools
import matplotlib.pyplot as plt
from sklearn.metrics.cluster import adjusted_rand_score
def read_csv_input(filename):
df = pd.read_csv(filename, header = None).to_numpy()
y = df[:, [0]]
X = df[:, range(1, df.shape[1])]
return X, y
def normalize(X):
means = np.mean(X, axis=0)
stds = | np.std(X, axis=0) | numpy.std |
import os
import glob
import time
from datetime import datetime
import pandas as pd
import torch
import numpy as np
import argparse
import gym
from env.gym_cancer.envs.cancercontrol import CancerControl
import matplotlib.pyplot as plt
# import pybullet_envs
import seaborn as sns
from PPO import PPO
def set_device(cuda=None):
print("============================================================================================")
# set device to cpu or cuda
device = torch.device('cpu')
if torch.cuda.is_available() and cuda is not None:
device = torch.device('cuda:' + str(cuda))
torch.cuda.empty_cache()
print("Device set to : " + str(torch.cuda.get_device_name(device)))
else:
print("Device set to : cpu")
print("============================================================================================")
return device
#################################### Testing ###################################
def plot_figure(data, save_path, par = 0):
if par:
save_name = "_best_survival_time.png"
else:
save_name = '_best_reward.png'
states = data["states"]
doses = data["doses"]
cs = sns.color_palette('Paired')
x = np.arange(states.shape[0]) * 28
ad = states[:, 0]
ai = states[:, 1]
psa = states[:, 2]
fig = plt.figure(figsize=(21, 7))
plt.style.use('seaborn')
plt.style.use(['science', "nature"])
ax1 = fig.add_subplot(1, 2, 1)
ax1.plot(x, psa, linestyle="-", linewidth=2)
nadir_psa = min(psa)
nadir_psa_x = x[ | np.where(psa == nadir_psa) | numpy.where |
import numpy as np
import cv2
import os
import dlib
from imutils.face_utils import rect_to_bb, shape_to_np, FaceAligner
from imutils.face_utils.helpers import FACIAL_LANDMARKS_IDXS
import matplotlib.pyplot as plt
path = '/home/yaurehman2/Documents/stereo_face_liveness/shape_predictor_68_face_landmarks.dat'
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor(path)
fa = FaceAligner(predictor, desiredFaceWidth=700)
def get_dataset(paths, filename): # this function get the dataset from the given path
# This function generates a txt file that is used by the video generating function. The txt file contains paths
# to the video data.
labels = []
file1 = open(filename, "w+")
for path in paths.split(':'):
if path != '':
path_exp = os.path.expanduser(path)
h_subjects = os.listdir(path_exp) # list all the files and directories inside in the path
h_subjects.sort() # sort all the directories
nr_of_subjects = len(h_subjects) # find the length of all the directories
for i in range(nr_of_subjects): # count upto the number of directories
h_subject_num = h_subjects[i] # select the ith directory
vid_dir = os.path.join(path_exp, h_subject_num) # getting the full path of the directory
if os.path.isdir(vid_dir): # check whether the given path is a directory or not
nr_of_videos = os.listdir(vid_dir) # list all the files in that directory
nr_of_videos.sort()
for index in nr_of_videos:
sub_index = index.split('_')
if sub_index[2] == 'real': # first class
labels.append([os.path.join(vid_dir, index), 0])
file1.write('%s %d \n' %(os.path.join(vid_dir,index),int(0)))
elif sub_index[2] == 'print': # second class
labels.append([os.path.join(vid_dir, index), 1])
file1.write('%s %d \n' % (os.path.join(vid_dir, index), int(1)))
elif sub_index[2] == 'cut': # 3rd class
labels.append([os.path.join(vid_dir, index), 2])
file1.write('%s %d \n' % (os.path.join(vid_dir, index), int(2)))
elif sub_index[2] == 'mobile': # 4th class
labels.append([os.path.join(vid_dir, index), 3])
file1.write('%s %d \n' % (os.path.join(vid_dir, index), int(3)))
elif sub_index[2] == 'tablet': # 5th class
labels.append([os.path.join(vid_dir, index), 4])
file1.write('%s %d \n' % (os.path.join(vid_dir, index), int(4)))
else:
vid_descrip = vid_dir.strip().split('/')
sub_index = vid_descrip[7].strip().split('_')
if sub_index[2] == 'real': # first class
labels.append([vid_dir, 0])
file1.write('%s %d \n' % (vid_dir, int(0)))
elif sub_index[2] == 'print': # second class
labels.append([vid_dir, 1])
file1.write('%s %d \n' % (vid_dir, int(1)))
elif sub_index[2] == 'cut': # 3rd class
labels.append([vid_dir, 2])
file1.write('%s %d \n' % (vid_dir, int(2)))
elif sub_index[2] == 'mobile': # 4th class
labels.append([vid_dir, 3])
file1.write('%s %d \n' % (vid_dir, int(3)))
elif sub_index[2] == 'tablet': # 5th class
labels.append([vid_dir, 4])
file1.write('%s %d \n' % (vid_dir, int(4)))
file1.close()
# ---------------------------------------------------------------------------------------------------------------------
def alignImages(im1, im2):
MAX_FEATURES = 500
GOOD_MATCH_PERCENT = 0.15
# Convert images to grayscale
im1Gray = cv2.cvtColor(im1, cv2.COLOR_BGR2GRAY)
im2Gray = cv2.cvtColor(im2, cv2.COLOR_BGR2GRAY)
# Detect ORB features and compute descriptors.
orb = cv2.ORB_create(MAX_FEATURES)
keypoints1, descriptors1 = orb.detectAndCompute(im1Gray, None)
keypoints2, descriptors2 = orb.detectAndCompute(im2Gray, None)
# Match features.
matcher = cv2.DescriptorMatcher_create(cv2.DESCRIPTOR_MATCHER_BRUTEFORCE_HAMMING)
matches = matcher.match(descriptors1, descriptors2, None)
# Sort matches by score
matches.sort(key=lambda x: x.distance, reverse=False)
# Remove not so good matches
numGoodMatches = int(len(matches) * GOOD_MATCH_PERCENT)
matches = matches[:numGoodMatches]
# Draw top matches
imMatches = cv2.drawMatches(im1, keypoints1, im2, keypoints2, matches, None)
cv2.imwrite("matches.jpg", imMatches)
# Extract location of good matches
points1 = np.zeros((len(matches), 2), dtype=np.float32)
points2 = np.zeros((len(matches), 2), dtype=np.float32)
for i, match in enumerate(matches):
points1[i, :] = keypoints1[match.queryIdx].pt
points2[i, :] = keypoints2[match.trainIdx].pt
# Find homography
h, mask = cv2.findHomography(points1, points2, cv2.RANSAC)
# Use homography
height, width, channels = im2.shape
im1Reg = cv2.warpPerspective(im1, h, (width, height))
return im1Reg, h
# ---------------------------------------------------------------------------------------------------------------------
# Function to extract video frames and put them in a corresponding separate directory
def video_data(file_path, frame_length, seed_input, img_rows, img_cols):
video_matrix_ = [] # define a video matrix to store video data
# video_matrix_l = []
database_matrix = [] # define the database matrix to store videos along with frames
capr = cv2.VideoCapture(file_path[0])
capl = cv2.VideoCapture(file_path[1])
video_r_length = int(capr.get(cv2.CAP_PROP_FRAME_COUNT))
video_r_width = int(capr.get(cv2.CAP_PROP_FRAME_WIDTH))
video_r_height = int(capr.get(cv2.CAP_PROP_FRAME_HEIGHT))
video_r_fps = capr.get(cv2.CAP_PROP_FPS)
video_l_length = int(capl.get(cv2.CAP_PROP_FRAME_COUNT))
video_l_width = int(capl.get(cv2.CAP_PROP_FRAME_WIDTH))
video_l_height = int(capl.get(cv2.CAP_PROP_FRAME_HEIGHT))
video_l_fps = capl.get(cv2.CAP_PROP_FPS)
print('length of the video = %g ---- height x width = %d x %d --- fps =%g' % (
video_r_length, video_r_height, video_r_width, video_r_fps))
print('length of the video = %g ---- height x width = %d x %d --- fps =%g' % (
video_l_length, video_l_height, video_l_width, video_l_fps))
counter = 0
starting_point = 0
ret_false_count = 0
while (capr.isOpened()) & (capl.isOpened()): # Read all frames of the video
ret_r, frame_r = capr.read()
ret_l, frame_l = capl.read()
if (counter != (video_r_length-ret_false_count)) & (counter != (video_l_length - ret_false_count)):
# if (counter != frame_length):
if (ret_r is not False) & (ret_l is not False):
# imReg, h = alignImages(frame_l, frame_r)
tensor_translated, check_face_id, _ = facial_tensor_landmarks(frame_r, frame_l, img_rows, img_cols)
print (counter, check_face_id)
if check_face_id != 0:
video_matrix_.append(tensor_translated)
# video_matrix_l.append(tensor_translated[:,:,3:6])
counter += 1
else:
ret_false_count += 1
else:
ret_false_count += 1
break
else:
break
if cv2.waitKey(1) & 0xFF == ord('q'):
break
capr.release()
capl.release()
np.random.seed(seed_input)
if len(video_matrix_)>10:
dummy = np.random.randint(0, len(video_matrix_), len(video_matrix_))
print (dummy)
counter_b = 0
for i in dummy:
selected_frames = video_matrix_[i]
# selected_l_frames = video_matrix_l[i]
# ----------------------------------------------------------------------------------------------------
# print selected_l_frames.dtype
# # for checking the output detected face. It can be commented
# comb_frames = np.concatenate((selected_r_frames,selected_l_frames), axis=1)
# cv2.imshow('min_frame', comb_frames)
# cv2.waitKey()
# -----------------------------------------------------------------------------------------------------
# tensor_translated, flg = facial_tensor_landmarks(selected_r_frames, selected_l_frames, img_rows, img_cols)
database_matrix.append(selected_frames)
counter_b += 1
print (np.asarray(database_matrix).shape)
if counter_b == frame_length:
break
# -----------------------------------------------------------------------------------------------------
# print tensor_translated.shape
# comb_frames = np.concatenate((tensor_translated[:, :, 0:3], tensor_translated[:, :, 3:6]), axis=1)
# cv2.imshow('min_frame', comb_frames)
# cv2.imshow(tensor_translated[:, :, 0:6:2])
# cv2.waitKey()
# ------------------------------------------------------------------------------------------------------
return np.asarray(database_matrix)
def facial_tensor_landmarks(frame_r, frame_l, img_rows, img_cols):
img_list = [frame_r, frame_l]
right_Eye_translate = []
bounding_boxes = []
for img in img_list:
if len(img.shape) == 3:
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
else:
img = img
rects = detector(img) # detect the face region
# print(rects)
for rect in rects:
bounding_boxes.append(rects)
# (x, y, w, h) = rect_to_bb(rect)
# face_area = cv2.resize(img[y:y + h - 15, x:x + w - 15], (img_rows, img_cols))
# cv2.imshow('dummy', face_area)
# cv2.waitKey()
land_marks = predictor(img, rect) # detect the landmarks in the face
land_marks = shape_to_np(land_marks) # convert the landmarks in tuples of x and y
(rEstart, rEend) = FACIAL_LANDMARKS_IDXS['right_eye'] # get the landmark of right eye
right_Eye_pts = land_marks[rEstart:rEend] # get the right eye points and arrange them
right_Eye_translate.append(right_Eye_pts) # append the right eye points in the array
# print (np.asarray(right_Eye_translate).shape)
if np.asarray(right_Eye_translate).shape[0] == 2: # condition to set to avoid a single image
# print right_Eye_translate
# finding the distance between eyes location and adjusting the translation
(im1RECX, im1RecY) = (right_Eye_translate[0][0][0], right_Eye_translate[0][0][1]) # select the right most point
(im2RECX, im2RECY) = (right_Eye_translate[1][0][0], right_Eye_translate[1][0][1]) # select the right most point
# compute the Euclidean distance
distx = im1RECX - im2RECX
disty = im1RecY - im2RECY
M = np.float32([[1, 0, distx], [0, 1, disty]]) # translation matrix
# print right_Eye_translate[1][2]
# pts1 = np.float32([right_Eye_translate[1][0], right_Eye_translate[1][1], right_Eye_translate[1][2]])
# pts2 = np.float32([right_Eye_translate[0][0], right_Eye_translate[0][1], right_Eye_translate[0][2]])
# M_1 = cv2.getAffineTransform(pts1, pts2)
rows, cols,dims = img_list[1].shape
frame1 = (img_list[1])
# translate the left image so it the face can overlap on the right image
dst = cv2.warpAffine(frame1, M, (cols, rows)) # do an affine transform
# dst = cv2.warpAffine(dst, M_1, (cols, rows))
# -------------------------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------------------------
# Just for testing purpose. You can comment this portion if you don't want to do test
# frame_gray1 = dst
# rects = detector(frame_gray1)
#
# for rect in rects:
# land_marks = predictor(frame_gray1, rect)
# land_marks = shape_to_np(land_marks)
# (rEstart, rEend) = FACIAL_LANDMARKS_IDXS['right_eye']
# right_Eye_pts = land_marks[rEstart:rEend]
# print right_Eye_pts
#
# # updated right eye point after translation of second camera image by dstx and disty
# (x, y) = right_Eye_pts[0]
# cv2.circle(frame_r, (x, y), 4, (0, 0, 255), -1)
#
# # original right eye point in the first camera image
# (x1, y1) = right_Eye_translate[0][0]
# cv2.circle(frame_r, (x1, y1), 4, (0, 255, 0), -1)
# # original right eye point in the second camera shown in the first camera image
# (x2, y2) = right_Eye_translate[1][0]
# cv2.circle(frame_r, (x2, y2), 4, (255, 0, 0), -1)
#
# vis = np.concatenate((frame_r, dst, frame_r, frame_l), axis=1)
# cv2.imshow('output', vis)
# cv2.waitKey(1)
# ---------------------------------------------------------------------------------------------------------
# ---------------------------------------------------------------------------------------------------------
# # For testing purpose
# tensor1 = np.zeros((frame_r.shape[0], frame_r.shape[1], 2*frame_r.shape[2]))
# tensor2 = np.zeros((frame_l.shape[0], frame_l.shape[1], 2*frame_l.shape[2]))
#
# tensor1[:, :, 0:3] = cv2.cvtColor(frame_r, cv2.COLOR_BGR2RGB)
# tensor1[:, :, 3:6] = cv2.cvtColor(dst, cv2.COLOR_BGR2RGB)
#
# tensor2[:, :, 0:3] = cv2.cvtColor(frame_r, cv2.COLOR_BGR2RGB)
# tensor2[:, :, 3:6] = cv2.cvtColor(frame_l, cv2.COLOR_BGR2RGB)
#
# plt.figure(1)
# plt.imshow(tensor1[:, :, 0:6:2])
# plt.figure(2)
# plt.imshow(tensor2[:, :, 0:6:2])
# plt.show()
# Extract the face area after translation
translated_images = []
image_translated = [frame_r, dst]
# cv2.imshow("window", dst)
# cv2.waitKey()
rect = 0
for i in image_translated:
# rects = detector(i)
if len(bounding_boxes) != 2:
print("The rectangle is empty")
break
else:
for rect in bounding_boxes[0] :
(x, y, w, h) = rect_to_bb(rect)
if x < 0 or y < 0 or w < 0 or h < 0:
break
else:
face_area = cv2.resize(i[y:y+h, x:x+w], (img_rows, img_cols))
translated_images.append(face_area)
if len(translated_images) == 2:
tensor_translated = np.uint8(np.zeros((face_area.shape[0], face_area.shape[1], 2*face_area.shape[2])))
tensor_translated[:, :, 0:3] = translated_images[0]
tensor_translated[:, :, 3:6] = translated_images[1]
# testing
plt.figure(3)
cv2.imshow('window', cv2.resize(tensor_translated[:, :, 6:0:-2],(255,255)))
# cv2.waitKey(1)
# plt.show()
s = 1
return tensor_translated, s, bounding_boxes
else:
s = 0
rect = 0
return [], s, rect
else:
s = 0
rect = 0
return [], s, rect
def facial_tensor_homography(img_list, img_rows, img_cols):
tensor1 = []
rect_st = []
s = 0
for i in img_list:
# detect the face region
rects = detector(i)
# only accommodate non-empty bounding boxes
if rects:
rect_st.append(rects)
# since we have two images, we need two bounding boxes, one for each image!
if len(rect_st) != 2:
print("The rectangle is empty")
return tensor1, s
# break
else:
cont = 0
for i in img_list:
rect1 = []
for z in rect_st[cont]:
rect1.append(z)
print(z)
if len(rect1) > 1:
(x, y, w, h) = rect_to_bb(rect1[0])
else:
(x, y, w, h) = rect_to_bb(rect1[0])
if x < 0 or y < 0 or w < 0 or h < 0:
print("The rectangle is empty")
s = 0
return tensor1, s
else:
# if len(rect_st)
cont += 1
face_area = cv2.resize(i[y:y + h, x:x + w], (img_rows, img_cols))
tensor1.append(face_area)
if len(tensor1) == 2:
reg_img = np.concatenate((tensor1[0], tensor1[1]), axis=-1)
cv2.imshow('registered', reg_img[:, :, 6:0:-2])
# cv2.waitKey(1)
s = 1
return reg_img, s
else:
s = 0
return tensor1, s
def check_face(frame_r, frame_l, img_rows, img_cols):
img_list = [frame_r, frame_l]
right_Eye_translate = []
for img in img_list:
# if len(img.shape) == 3:
# frame_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# else:
# frame_gray = img
rects = detector(img,1) # detect the face region
for rect in rects:
land_marks = predictor(img, rect) # detect the landmarks in the face
land_marks = shape_to_np(land_marks) # convert the landmarks in tuples of x and y
(rEstart, rEend) = FACIAL_LANDMARKS_IDXS['right_eye'] # get the landmark of right eye
right_Eye_pts = land_marks[rEstart:rEend] # get the right eye points and arrange them
right_Eye_translate.append(right_Eye_pts) # append the right eye points in the array
# print np.asarray(right_Eye_translate).shape
s = 0
if np.asarray(right_Eye_translate).shape[0] == 2: # condition to set if there is a single image
# print right_Eye_translate
# finding the distance between eyes location and adjusting the translation
(im1RECX, im1RecY) = (right_Eye_translate[0][0][0], right_Eye_translate[0][0][1]) # select the right most point
(im2RECX, im2RECY) = (right_Eye_translate[1][0][0], right_Eye_translate[1][0][1]) # select the right most point
# compute the Euclidean distance
distx = im1RECX - im2RECX
disty = im1RecY - im2RECY
M = np.float32([[1, 0, distx], [0, 1, disty]]) # translation matrix
# print right_Eye_translate[1][2]
# pts1 = np.float32([right_Eye_translate[1][0], right_Eye_translate[1][1], right_Eye_translate[1][2]])
# pts2 = np.float32([right_Eye_translate[0][0], right_Eye_translate[0][1], right_Eye_translate[0][2]])
# M_1 = cv2.getAffineTransform(pts1, pts2)
rows, cols, dims = img_list[1].shape
frame1 = (img_list[1])
# translate the left image so it the face can overlap on the right image
dst = cv2.warpAffine(frame1, M, (cols, rows)) # do an affine transform
# dst = cv2.warpAffine(dst, M_1, (cols, rows))
# -------------------------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------------------------
# Just for testing purpose. You can comment this portion if you don't want to do test
# frame_gray1 = dst
# rects = detector(img)
#
# for rect in rects:
# land_marks = predictor(frame_gray1, rect)
# land_marks = shape_to_np(land_marks)
# (rEstart, rEend) = FACIAL_LANDMARKS_IDXS['right_eye']
# right_Eye_pts = land_marks[rEstart:rEend]
# print (right_Eye_pts)
#
# # updated right eye point after translation of second camera image by dstx and disty
# (x, y) = right_Eye_pts[0]
# cv2.circle(frame_r, (x, y), 4, (0, 0, 255), -1)
#
# # original right eye point in the first camera image
# (x1, y1) = right_Eye_translate[0][0]
# cv2.circle(frame_r, (x1, y1), 4, (0, 255, 0), -1)
# # original right eye point in the second camera shown in the first camera image
# (x2, y2) = right_Eye_translate[1][0]
# cv2.circle(frame_r, (x2, y2), 4, (255, 0, 0), -1)
#
# vis = np.concatenate((frame_r, dst, frame_r, frame_l), axis=1)
# cv2.imshow('output', vis)
# cv2.waitKey(1)
# ---------------------------------------------------------------------------------------------------------
# ---------------------------------------------------------------------------------------------------------
# # For testing purpose
# tensor1 = np.zeros((frame_r.shape[0], frame_r.shape[1], 2*frame_r.shape[2])).astype(np.uint8)
# tensor2 = np.zeros((frame_l.shape[0], frame_l.shape[1], 2*frame_l.shape[2])).astype(np.uint8)
#
# tensor1[:, :, 0:3] = cv2.cvtColor(frame_r, cv2.COLOR_BGR2RGB)
# tensor1[:, :, 3:6] = cv2.cvtColor(dst, cv2.COLOR_BGR2RGB)
#
# tensor2[:, :, 0:3] = cv2.cvtColor(frame_r, cv2.COLOR_BGR2RGB)
# tensor2[:, :, 3:6] = cv2.cvtColor(frame_l, cv2.COLOR_BGR2RGB)
#
# plt.figure(1)
# plt.imshow(tensor1[:, :, 0:6:2])
# plt.figure(2)
# plt.imshow(tensor2[:, :, 0:6:2])
# plt.show()
# Extract the face area after translation
translated_images = []
image_translated = [frame_r, dst]
# cv2.imshow("window", dst)
# cv2.waitKey()
for i in image_translated:
# rects = detector(cv2.cvtColor(i, cv2.COLOR_BGR2GRAY), 1)
rects = detector(i,1)
if not rects:
print("The rectangle is empty")
break
for rect in rects:
(x, y, w, h) = rect_to_bb(rect)
face_area = cv2.resize(i[y:y + h, x:x + w], (img_rows, img_cols))
translated_images.append(face_area)
# print (np.asarray(translated_images).shape)
if len(translated_images) == 2:
tensor_translated = np.uint8( | np.zeros((face_area.shape[0], face_area.shape[1], 2 * face_area.shape[2])) | numpy.zeros |
import numpy as np
import datetime
import dateutil.parser
import cdflib
import matplotlib.pyplot as plt
import matplotlib as mpl
import string
#---Time conversion. All times are in UTC.
# internally save time as (whole ut sec, fractional ut sec up to pica sec accuracy).
# t_epoch, accurate to 1 milli second, in milli sec.
# t_epoch16, accurate to 1 pico second, real part in sec, imagine part in pica sec.
# t_tt2000, accurate to 1 nano second, in nano sec, contains leap second.
# t_ut, accurate to 1 milli second, in sec.
# datetime, accurate to 1 micro second, in object.
class ut(object):
t0_datetime = datetime.datetime(1970,1,1)
secofday = 86400.
secofday1 = 1/secofday
t0_jd = 2440587.5 # in day, 0 of Julian day.
t0_mjd = 40587. # in day, 0 of modified Julian day.
t0_sdt = 50716800. # in sec, 0 of times in SDT.
# real part is the ut second, imaginary part is the fractional second.
def __init__(self, times, format=''):
# input can be an array of string or number, or a single string or number.
if type(times) == str: # input is a string, change it into a list.
t_input = [times]
elif not isinstance(times, (list,np.ndarray)): # input is not a list or nparray.
t_input = [times]
else: t_input = times
# complex 128 includes 2 float 64, which have >14 significant digits.
self.t_ep16 = np.zeros(len(t_input),dtype=np.complex128)
# input is a string, parse each.
if type(t_input[0]) == str:
t_ut = np.empty(len(t_input))
for i, ttime in enumerate(t_input):
if format == '': # no format code, use parser.
t_datetime = dateutil.parser.parse(ttime)
else: # use explicit format codes.
t_datetime = datetime.datetime.strptime(ttime, format)
t_ut[i] = (t_datetime-self.t0_datetime).total_seconds()
self.t_ep16.real = np.remainder(t_ut, 1)
self.t_ep16.imag = t_ut - self.t_ep16.real
elif type(t_input[0]) == datetime.datetime:
t_ut = np.empty(len(t_input))
for i, ttime in enumerate(t_input):
t_ut[i] = (ttime-self.t0_datetime).total_seconds()
self.t_ep16.real = np.remainder(t_ut, 1)
self.t_ep16.imag = t_ut - self.t_ep16.real
else:
# input should be number, then use numpy array.
# datetime object is used in some conversions, it's accurate to milli sec.
if type(t_input) == list:
t_input = np.array(t_input)
if format == '': format = 'ut'
if format in ['ut','utc','unix']: # ut in sec, accurate to milli sec.
self.t_ep16.imag = np.remainder(t_input, 1)
self.t_ep16.real = t_input - self.t_ep16.imag
elif format == 'epoch': # epoch is in msec, accurate to milli sec.
t0 = cdflib.cdfepoch.breakdown_epoch(t_input[0])
t_datetime = datetime.datetime(*t0)
t0 = (t_datetime-self.t0_datetime).total_seconds()
t_ut = t0+(t_input-t_input[0])*1e-3
self.t_ep16.imag = np.remainder(t_ut, 1)
self.t_ep16.real = t_ut-self.t_ep16.imag
elif format == 'epoch16': # epoch16 is in (sec,pica sec), accurate to pica sec.
# get the ut sec for the first time.
t_list = cdflib.cdfepoch.breakdown_epoch16(t_input[0])
t_datetime = datetime.datetime(*t_list[0:6])
t0 = (t_datetime-self.t0_datetime).total_seconds() # ut sec, in sec.
# decompose ut sec and fractional sec.
self.t_ep16.real = t0+t_input.real-t_input.real[0]
self.t_ep16.imag = t_input.imag*1e-12 # convert pica sec to sec.
elif format == 'tt2000': # tt2000 is in nano sec, accurate to nano sec.
# get the ut sec for the first time.
t_list = cdflib.cdfepoch.breakdown_tt2000(t_input[0])
t_datetime = datetime.datetime(*t_list[0:6])
t0_ut = (t_datetime-self.t0_datetime).total_seconds() # ut sec, in sec.
# decompose input time into sec and fractional sec in sec.
t0_t2000 = np.longlong(t_input[0]-t_list[6]*1e6-t_list[7]*1e3-t_list[8]) # the first time without fractional sec.
t_dt = np.int64(t_input-t0_t2000) # nano sec relative to the first time.
dt_dt = np.mod(t_dt, np.int64(1e9)) # the part of fractional sec in nano sec.
self.t_ep16.real = t0_ut+(t_dt-dt_dt)*1e-9
self.t_ep16.imag = dt_dt*1e-9
elif format == 'mjd': # modified Julian day, in day.
t_ut = (t_input-self.t0_mjd)*self.secofday
self.t_ep16.imag = np.remainder(t_ut, 1)
self.t_ep16.real = t_ut-self.t_ep16.imag
elif format == 'jd': # Julian day, in day.
t_ut = (t_input-self.t0_jd)*self.secofday
self.t_ep16.imag = np.remainder(t_ut, 1)
self.t_ep16.real = t_ut-self.t_ep16.imag
elif format == 'sdt': # times in SDT, in sec, has a different zero time.
t_ut = t_input-self.t0_sdt
self.t_ep16.imag = np.remainder(t_ut, 1)
self.t_ep16.real = t_ut-self.t_ep16.imag
def __getitem__(self, item):
return self.t_ep16[item].real+self.t_ep16[item].imag
def __eq__(self, other):
if type(other) == ut: return self.t_ep16 == other.t_ep16
elif np.iscomplex(other): return self.t_ep16 == other
else: return self.ut() == other
def __lt__(self, other):
if type(other) == ut: return self.t_ep16 < other.t_ep16
elif np.iscomplex(other): return self.t_ep16 < other
else: return self.ut() < other
def __le__(self, other):
if type(other) == ut: return self.t_ep16 <= other.t_ep16
elif np.iscomplex(other): return self.t_ep16 <= other
else: return self.ut() <= other
def __gt__(self, other):
if type(other) == ut: return self.t_ep16 > other.t_ep16
elif np.iscomplex(other): return self.t_ep16 > other
else: return self.ut() > other
def __ge__(self, other):
if type(other) == ut: return self.t_ep16 >= other.t_ep16
elif | np.iscomplex(other) | numpy.iscomplex |
import numpy as np
from PIL import Image
import os
from skimage.transform import resize
from scipy.ndimage.filters import convolve
import argparse
from sklearn.metrics import mean_squared_error as compare_mse
from sklearn.metrics import mean_absolute_error as compare_mae
from skimage.measure import compare_psnr
from skimage.measure import compare_ssim
compare_mae5 = lambda a, b: compare_mae(a, b) * 1e5
compare_rmse = lambda a, b: np.sqrt(compare_mse(a, b))
compare_rmse5 = lambda a, b: np.sqrt(compare_mse(a, b)) * 1e5
resize2 = lambda x, y: resize(x, output_shape=y, preserve_range=True, anti_aliasing=True, mode='constant')
parser = argparse.ArgumentParser()
parser.add_argument("--result_dir", type=str, required=True, help="Path to the generated images to be evaluated")
parser.add_argument("--version", type=str, default='testcube3d_latest', help="Version of model")
args = parser.parse_args()
path = os.path.join(args.result_dir, args.version)
output_file = os.path.join(args.result_dir, args.version, 'scores_bi.txt')
with open(output_file, 'w') as f:
print('Loading data paths...')
# Load data
real_As = sorted([os.path.join(path, x) for x in os.listdir(path) if 'real_A' in x])
fake_Bs = sorted([os.path.join(path, x) for x in os.listdir(path) if 'fake_B' in x])
real_Bs = sorted([os.path.join(path, x) for x in os.listdir(path) if 'real_B' in x])
fake_As = sorted([os.path.join(path, x) for x in os.listdir(path) if 'fake_A' in x])
is_backward = True if len(fake_As) > 0 else False
if is_backward:
real_As = sorted([os.path.join(path, x) for x in os.listdir(path) if 'real_A' in x])
f.write('Model: {}\tVersion: {}\tFwrd: {} \tBwrd: {}\n'.format(args.result_dir, args.version, len(fake_Bs), len(fake_As)))
# Forward
rmse_s, mae_s, psnr_s, ssim_s = [], [], [], []
for fake_B_path, real_B_path in zip(fake_Bs, real_Bs):
print('Loading real_B:', real_B_path)
im_true = np.load(real_B_path).flatten()
print('Loading fake_B:', fake_B_path)
im_pred = np.load(fake_B_path).flatten()
print('comparing normal...')
rmse = compare_rmse(im_true, im_pred)
rmse_s.append(rmse)
print('rmse: ', rmse)
mae = compare_mae(im_true, im_pred)
mae_s.append(mae)
print('mae: ', mae)
psnr = compare_psnr(im_true, im_pred)
psnr_s.append(psnr)
print('psnr:', psnr)
ssim = compare_ssim(im_true, im_pred)
ssim_s.append(ssim)
print('ssim:', ssim)
del im_pred
del im_true
print('\nForward Real Results\n')
print('Total RMSE: {:.4f}\t{:.4f}\n'.format(np.mean(rmse_s), np.std(rmse_s)))
print('Total MAE: {:.4f}\t{:.4f}\n'.format(np.mean(mae_s), np.std(mae_s)))
print('Total PSNR: {:.4f}\t{:.4f}\n'.format(np.mean(psnr_s), np.std(psnr_s)))
print('Total SSIM: {:.4f}\t{:.4f}\n'.format(np.mean(ssim_s), | np.std(ssim_s) | numpy.std |
'''
This is a follow up of https://letianzj.github.io/portfolio-management-one.html
It backtests four portfolios: GMV, tangent, maximum diversification and risk parity
and compare them with equally-weighted portfolio
'''
import os
import numpy as np
import pandas as pd
import pytz
from datetime import datetime, timezone
import quanttrader as qt
from scipy.optimize import minimize
import matplotlib.pyplot as plt
import empyrical as ep
import pyfolio as pf
# set browser full width
from IPython.core.display import display, HTML
display(HTML("<style>.container { width:100% !important; }</style>"))
# ------------------ help functions -------------------------------- #
def minimum_vol_obj(wo, cov):
w = wo.reshape(-1, 1)
sig_p = np.sqrt(np.matmul(w.T, | np.matmul(cov, w) | numpy.matmul |
import matplotlib.pyplot
import numpy
def errorize(observation, error):
"""
Adds error to the observation.
:param observation: original observation
:param error: error probability
:return: errored observation
"""
errored = observation
for i in range(len(errored)):
if numpy.random.rand() < error:
errored[i] = (errored[i] + 1) % 2
return errored
def sensor_model(location, states, moves, error):
"""
Gets the sensor model.
:param location: current location
:param states: possible states
:param moves: possible moves
:param error: error probability
:return: sensor matrix (N x N)
"""
N = len(states)
matrix = numpy.zeros((N, N))
actual_observation = errorize(sense(location, states, moves), error)
for i in range(N):
true_observation = sense(states[i], states, moves)
d = 0
for true_direction, actual_direction in zip(true_observation, actual_observation):
if true_direction != actual_direction:
d = d + 1
matrix[i][i] = ((1 - error) ** (len(true_observation) - d)) * (error ** d)
return matrix
def transition_model(states, moves):
"""
Gets the transitional model.
:param states: possible states
:param moves: possible moves
:return: transition matrix (N x N)
"""
N = len(states)
matrix = numpy.ndarray((N, N))
for i in range(N):
observation = sense(states[i], states, moves)
n_zeroes = len(observation) - sum(observation)
for j in range(N):
matrix[i][j] = 0.0 if n_zeroes == 0 else 1.0 / n_zeroes
return matrix
def viterbi(locations, states, moves, error):
"""
Viterbi algorithm.
:param locations: path locations
:param states: possible states
:param moves: possible moves
:param error: error probability
:return: best possible states
"""
N = len(states)
T = len(locations)
M = numpy.ndarray((N, T + 1))
M_star = numpy.ndarray((N, T + 1), numpy.int)
transition_matrix = transition_model(states, moves)
initial_transition = numpy.full((N,), 1.0 / N)
for i in range(N):
M[i][1] = initial_transition[i]
M_star[i][1] = 0
for t in range(2, T + 1):
sensor_matrix = sensor_model(locations[t - 1], states, moves, error)
for i in range(N):
max_finding = []
for j in range(N):
max_finding.append(M[j][t - 1] * transition_matrix[j][i] * sensor_matrix[i][i])
M[i][t] = max(max_finding)
M_star[i][t] = numpy.argmax(max_finding)
best_path = numpy.ndarray((T + 1,), numpy.int)
max_finding = []
for i in range(N):
max_finding.append(M[i][T])
best_path[T] = numpy.argmax(max_finding)
for i in range(T, 1, -1):
best_path[i - 1] = M_star[best_path[i]][i]
return [states[index] for index in best_path[1:]]
def question_two(locations, states, moves, error):
"""
Solution to question two.
:param locations: path locations
:param states: possible states
:param moves: possible moves
:param error: error probability
:return: path accuracies at every time
"""
viterbi_states = viterbi(locations, states, moves, error)
accuracies = []
accurate_count = 0
for index, (location, viterbi_state) in enumerate(zip(locations, viterbi_states)):
if location == viterbi_state:
accurate_count = accurate_count + 1
accuracies.append(accurate_count / (index + 1))
return accuracies
def manhattan(location1, location2):
"""
Manhattan distance.
:param location1: x
:param location2: y
:return: distance
"""
return abs(location1[0] - location2[0]) + abs(location1[1] - location2[1])
def forward(locations, states, moves, error):
"""
Forward algorithm.
:param locations: path locations
:param states: possible states
:param moves: possible moves
:param error: error probability
:return: best possible states
"""
N = len(states)
forward_states = []
transition_matrix = transition_model(states, moves)
forward_variable = | numpy.full((N,), 1.0 / N) | numpy.full |
#!/usr/bin/python
import math
import numpy as np
import scipy
from scipy.signal import argrelextrema, find_peaks
import scipy.ndimage as nd
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import numpy as np
import pandas as pd
import seaborn as sns
from pylab import cm
import os
import sys
import re
import sympy
import cmath
import matplotlib.patches as patches
from matplotlib import colors as m2colors
cwd = os.getcwd()
sns.set(style="ticks", font_scale=1.5)
mcolors = dict(m2colors.BASE_COLORS, **m2colors.CSS4_COLORS)
def static_alphaG(p,t):
return p['a0'] * | np.exp(t/p['tau_SG']) | numpy.exp |
import numpy as np
class stress(np.ndarray):
# Inherits Numpy array class
#
def __new__(cls, a):
# Create Array Object
obj = | np.asarray(a) | numpy.asarray |
import numpy as np
import os
import torch
import torchvision
from sklearn import datasets
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from torch.utils.data import Dataset, random_split
from torchvision import transforms
import PIL.Image as Image
from sklearn.datasets import load_boston
## Custom PyTorch Dataset Class wrapper
class CustomDataset(Dataset):
def __init__(self, data, target, device=None, transform=None):
self.transform = transform
if device is not None:
# Push the entire data to given device, eg: cuda:0
self.data = data.float().to(device)
self.targets = target.long().to(device)
else:
self.data = data.float()
self.targets = target.long()
def __len__(self):
return len(self.targets)
def __getitem__(self, idx):
if torch.is_tensor(idx):
idx = idx.tolist()
sample_data = self.data[idx]
label = self.targets[idx]
if self.transform is not None:
sample_data = self.transform(sample_data)
return (sample_data, label) # .astype('float32')
class CustomDataset_WithId(Dataset):
def __init__(self, data, target, transform=None):
self.transform = transform
self.data = data # .astype('float32')
self.targets = target
self.X = self.data
self.Y = self.targets
def __len__(self):
return len(self.targets)
def __getitem__(self, idx):
if torch.is_tensor(idx):
idx = idx.tolist()
sample_data = self.data[idx]
label = self.targets[idx]
if self.transform is not None:
sample_data = self.transform(sample_data)
return sample_data, label, idx # .astype('float32')
## Utility function to load datasets from libsvm datasets
def csv_file_load(path, dim, save_data=False):
data = []
target = []
with open(path) as fp:
line = fp.readline()
while line:
temp = [i for i in line.strip().split(",")]
target.append(int(float(temp[-1]))) # Class Number. # Not assumed to be in (0, K-1)
temp_data = [0] * dim
count = 0
for i in temp[:-1]:
# ind, val = i.split(':')
temp_data[count] = float(i)
count += 1
data.append(temp_data)
line = fp.readline()
X_data = np.array(data, dtype=np.float32)
Y_label = np.array(target)
if save_data:
# Save the numpy files to the folder where they come from
data_np_path = path + '.data.npy'
target_np_path = path + '.label.npy'
np.save(data_np_path, X_data)
np.save(target_np_path, Y_label)
return (X_data, Y_label)
def libsvm_file_load(path, dim, save_data=False):
data = []
target = []
with open(path) as fp:
line = fp.readline()
while line:
temp = [i for i in line.strip().split(" ")]
target.append(int(float(temp[0]))) # Class Number. # Not assumed to be in (0, K-1)
temp_data = [0] * dim
for i in temp[1:]:
ind, val = i.split(':')
temp_data[int(ind) - 1] = float(val)
data.append(temp_data)
line = fp.readline()
X_data = np.array(data, dtype=np.float32)
Y_label = np.array(target)
if save_data:
# Save the numpy files to the folder where they come from
data_np_path = path + '.data.npy'
target_np_path = path + '.label.npy'
np.save(data_np_path, X_data)
np.save(target_np_path, Y_label)
return (X_data, Y_label)
def census_load(path, dim, save_data=False):
enum = enumerate(
['Private', 'Self-emp-not-inc', 'Self-emp-inc', 'Federal-gov', 'Local-gov', 'State-gov', 'Without-pay',
'Never-worked'])
workclass = dict((j, i) for i, j in enum)
enum = enumerate(
['Bachelors', 'Some-college', '11th', 'HS-grad', 'Prof-school', 'Assoc-acdm', 'Assoc-voc', '9th', '7th-8th',
'12th', 'Masters', '1st-4th', '10th', 'Doctorate', '5th-6th', 'Preschool'])
education = dict((j, i) for i, j in enum)
enum = enumerate(
['Married-civ-spouse', 'Divorced', 'Never-married', 'Separated', 'Widowed', 'Married-spouse-absent',
'Married-AF-spouse'])
marital_status = dict((j, i) for i, j in enum)
enum = enumerate(['Tech-support', 'Craft-repair', 'Other-service', 'Sales', 'Exec-managerial', 'Prof-specialty',
'Handlers-cleaners',
'Machine-op-inspct', 'Adm-clerical', 'Farming-fishing', 'Transport-moving', 'Priv-house-serv',
'Protective-serv', 'Armed-Forces'])
occupation = dict((j, i) for i, j in enum)
enum = enumerate(['Wife', 'Own-child', 'Husband', 'Not-in-family', 'Other-relative', 'Unmarried'])
relationship = dict((j, i) for i, j in enum)
enum = enumerate(['White', 'Asian-Pac-Islander', 'Amer-Indian-Eskimo', 'Other', 'Black'])
race = dict((j, i) for i, j in enum)
sex = {'Female': 0, 'Male': 1}
enum = enumerate(
['United-States', 'Cambodia', 'England', 'Puerto-Rico', 'Canada', 'Germany', 'Outlying-US(Guam-USVI-etc)',
'India', 'Japan', 'Greece', 'South', 'China', 'Cuba', 'Iran', 'Honduras', 'Philippines', 'Italy', 'Poland',
'Jamaica',
'Vietnam', 'Mexico', 'Portugal', 'Ireland', 'France', 'Dominican-Republic', 'Laos', 'Ecuador', 'Taiwan',
'Haiti', 'Columbia',
'Hungary', 'Guatemala', 'Nicaragua', 'Scotland', 'Thailand', 'Yugoslavia', 'El-Salvador', 'Trinadad&Tobago',
'Peru', 'Hong',
'Holand-Netherlands'])
native_country = dict((j, i) for i, j in enum)
data = []
target = []
with open(path) as fp:
line = fp.readline()
while line:
temp = [i.strip() for i in line.strip().split(",")]
if '?' in temp or len(temp) == 1:
line = fp.readline()
continue
if temp[-1].strip() == "<=50K" or temp[-1].strip() == "<=50K.":
target.append(0)
else:
target.append(1)
temp_data = [0] * dim
count = 0
# print(temp)
for i in temp[:-1]:
if count == 1:
temp_data[count] = workclass[i.strip()]
elif count == 3:
temp_data[count] = education[i.strip()]
elif count == 5:
temp_data[count] = marital_status[i.strip()]
elif count == 6:
temp_data[count] = occupation[i.strip()]
elif count == 7:
temp_data[count] = relationship[i.strip()]
elif count == 8:
temp_data[count] = race[i.strip()]
elif count == 9:
temp_data[count] = sex[i.strip()]
elif count == 13:
temp_data[count] = native_country[i.strip()]
else:
temp_data[count] = float(i)
temp_data[count] = float(temp_data[count])
count += 1
data.append(temp_data)
line = fp.readline()
X_data = np.array(data, dtype=np.float32)
Y_label = np.array(target)
if save_data:
# Save the numpy files to the folder where they come from
data_np_path = path + '.data.npy'
target_np_path = path + '.label.npy'
np.save(data_np_path, X_data)
np.save(target_np_path, Y_label)
return (X_data, Y_label)
def create_imbalance(x_trn, y_trn, x_val, y_val, x_tst, y_tst, num_cls, ratio):
np.random.seed(42)
samples_per_class = np.zeros(num_cls)
val_samples_per_class = np.zeros(num_cls)
tst_samples_per_class = np.zeros(num_cls)
for i in range(num_cls):
samples_per_class[i] = len(np.where(y_trn == i)[0])
val_samples_per_class[i] = len(np.where(y_val == i)[0])
tst_samples_per_class[i] = len(np.where(y_tst == i)[0])
min_samples = int(np.min(samples_per_class) * 0.1)
selected_classes = np.random.choice(np.arange(num_cls), size=int(ratio * num_cls), replace=False)
for i in range(num_cls):
if i == 0:
if i in selected_classes:
subset_idxs = np.random.choice(np.where(y_trn == i)[0], size=min_samples, replace=False)
else:
subset_idxs = np.where(y_trn == i)[0]
x_trn_new = x_trn[subset_idxs]
y_trn_new = y_trn[subset_idxs].reshape(-1, 1)
else:
if i in selected_classes:
subset_idxs = np.random.choice(np.where(y_trn == i)[0], size=min_samples, replace=False)
else:
subset_idxs = np.where(y_trn == i)[0]
x_trn_new = np.row_stack((x_trn_new, x_trn[subset_idxs]))
y_trn_new = np.row_stack((y_trn_new, y_trn[subset_idxs].reshape(-1, 1)))
max_samples = int(np.max(val_samples_per_class))
for i in range(num_cls):
y_class = np.where(y_val == i)[0]
if i == 0:
subset_ids = | np.random.choice(y_class, size=max_samples - y_class.shape[0], replace=True) | numpy.random.choice |
'''Compare different estimators on public datasets
Code modified from https://github.com/tmadl/sklearn-random-bits-forest
'''
import argparse
import os
import pickle as pkl
import time
import warnings
from collections import defaultdict, OrderedDict
from typing import Any, Callable, List, Dict, Tuple
import numpy as np
import pandas as pd
from scipy.interpolate import interp1d
from sklearn.base import BaseEstimator
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier
from sklearn.metrics import accuracy_score, roc_auc_score, average_precision_score, make_scorer
from sklearn.model_selection import KFold, train_test_split, cross_validate
from tqdm import tqdm
from experiments.config.config_general import DATASETS
from experiments.config.util import get_estimators_for_dataset, get_ensembles_for_dataset
from experiments.util import Model, MODEL_COMPARISON_PATH, get_clean_dataset, get_best_accuracy, remove_x_axis_duplicates
warnings.filterwarnings("ignore", message="Bins whose width")
def get_complexity(estimator: BaseEstimator) -> float:
if isinstance(estimator, (RandomForestClassifier, GradientBoostingClassifier)):
complexity = 0
for tree in estimator.estimators_:
if type(tree) is np.ndarray:
tree = tree[0]
complexity += (2 ** tree.get_depth()) * tree.get_depth()
return complexity
else:
return estimator.complexity_
def compute_meta_auc(result_data: pd.DataFrame,
prefix: str = '',
low_complexity_cutoff: int = 30,
max_start_complexity: int = 10) -> Tuple[pd.DataFrame, Tuple[float]]:
# LOW_COMPLEXITY_CUTOFF: complexity score under which a model is considered interpretable
# MAX_START_COMPLEXITY: min complexity of curves included in the AUC-of-AUC comparison must be below this value
# x_column = f'{prefix}_mean_complexity'
x_column = f'mean_complexity'
compute_columns = result_data.columns[result_data.columns.str.contains('mean')]
estimators = np.unique(result_data.index)
xs = np.empty(len(estimators), dtype=object)
ys = xs.copy()
for i, est in enumerate(estimators):
est_result_df = result_data[result_data.index.str.fullmatch(est)]
complexities_unsorted = est_result_df[x_column]
complexity_sort_indices = complexities_unsorted.argsort()
complexities = complexities_unsorted[complexity_sort_indices]
roc_aucs = est_result_df.iloc[complexity_sort_indices][compute_columns]
xs[i] = complexities.values
ys[i] = roc_aucs.values
# filter out curves which start too complex
start_under_10 = list(map(lambda x: min(x) < max_start_complexity, xs))
# find overlapping complexity region for roc-of-roc comparison
meta_auc_lb = max([x[0] for x in xs])
endpts = | np.array([x[-1] for x in xs]) | numpy.array |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Apr 10 14:15:37 2020
@author: <NAME>
"""
import numpy as np
import matplotlib.pyplot as plt
import scipy.io as sio
import sklearn
from sklearn.model_selection import train_test_split, StratifiedKFold
import seaborn as sns
# from tabulate import tabulate
# from imblearn import under_sampling, over_sampling
def normalize(pressure):
"""
Scales each array of the given array of arrays to the range [0, 1]
Only considers values in the same tactile frame
"""
normalized_p = np.copy(pressure)
for i in range(pressure.shape[0]):
min_p = np.min(pressure[i])
normalized_p[i] = (pressure[i] - min_p) / np.max(pressure[i] - min_p)
return normalized_p
def normalize_per_pixel(pressure):
"""
Scales each element of the given array of arrays to the range [0, 1]
Considers values in all tactile frames
"""
normalized_p = | np.copy(pressure) | numpy.copy |
# -*- coding: utf-8 -*-
"""
Created on Tue Dec 10 14:26:54 2019
@author: Mnemosyne
Functions to compute the features of the song
"""
import os
import shutil
import glob
import sys
import random
import re
import numpy as np
import scipy as sp
import scipy.io.wavfile as wav
from scipy.fftpack import fft, rfft
from scipy.optimize import curve_fit
import scipy.signal as signal
from scipy.stats.mstats import gmean
from sklearn.cluster import KMeans
from pydub import AudioSegment
from pydub import silence
import matplotlib.pyplot as plt
import matplotlib.colors as colors
from mpl_toolkits.mplot3d import axes3d
import matplotlib.colors as colors
from threading import Thread
import librosa.feature
import librosa.effects
from songbird_data_analysis import Song_functions
# Onset and offset of the syllables: to compute duration of syllables and gaps
def cut(rawsong, sr, threshold, min_syl_dur, min_silent_dur, f_cut_min, f_cut_max):
"""
This function is meant to be used on a single recording, create an external loop
to apply on several recordings (see function distribution).
VARIABLES:
- rawsong: the wav file a song
- sr: sampling rate
OUTPUT:
- onset and offset of each syllable of the song
So for syllable 1 of a song, its onset is onsets[0] and its offset is offsets[0].
To get that segment of the spectrogram, you'd take spect[:,onsets[0]:offsets[0]]
"""
# parameters that might be adjusted dependending on the bird
rawsong = rawsong.astype(float)
rawsong = rawsong.flatten()
amp = Song_functions.smooth_data(rawsong,sr,freq_cutoffs=(f_cut_min, f_cut_max))
(onsets, offsets) = Song_functions.segment_song(amp,segment_params={'threshold': threshold, 'min_syl_dur': min_syl_dur, 'min_silent_dur': min_silent_dur},samp_freq=sr) # Detects syllables according to the threshold you set
return amp, onsets, offsets
def test_features(songfile, args):
"""
A function to tune the parameter depending on the dataset and test the feature extraction
INPUT:
One recording.
OUTPUT
- plot of the spectrogram, onset & offset and amplitude of the selected syllables
- plot the pitches
- plot the coupling of the features two by two
"""
# read the data
sr, samples = wav.read(songfile[0])
y, sr = librosa.load(songfile[0], sr=16000)
# determine onset and offset of the syllables for this song
amp, onsets, offsets = cut(samples, sr, args.threshold, args.min_syl_dur, args.min_silent_dur, args.f_cut_min, args.f_cut_max)
# Make output directory
aux_output_dir = os.path.join(args.data_dir,args.output_dir)
if not os.path.isdir(aux_output_dir):
os.makedirs(aux_output_dir)
os.chdir(aux_output_dir)
# Spectrogram with librosa
X = librosa.stft(y, n_fft=args.N, hop_length=args.H, win_length=args.N, window='hann', pad_mode='constant', center=True)
Y = np.log(1 + 100 * np.abs(X) ** 2)
T_coef = np.arange(X.shape[1]) * args.H / sr
K = args.N // 2
F_coef = np.arange(K + 1) * sr / args.N
# Plot
noverlap = args.nperseg - args.overlap
fig, (ax1, ax2, ax3) = plt.subplots(nrows=3, sharex=True)
# Plots spectrogram
#(f,t,spect)=sp.signal.spectrogram(samples, sr, args.window, args.nperseg, noverlap, mode='complex')
#ax1.imshow(10*np.log10(np.square(abs(spect))), origin="lower", aspect="auto", interpolation="none", extent=[0, max(t)*1000, min(f), max(f)], cmap = 'inferno')
extent = [T_coef[0], T_coef[-1], F_coef[0], 8000]
ax1.imshow(Y, cmap=args.color, aspect='auto', origin='lower', extent=extent, norm=colors.PowerNorm(gamma=0.2))
ax1.set_ylabel('Frequency (Hz)')
# Plots song signal amplitude
x_amp=np.arange(len(amp))
ax2.plot(x_amp/sr*1000,samples,color='grey')
for i in range(0,len(onsets)):
ax2.axvline(x=onsets[i]/sr*1000,color='olivedrab',linestyle='dashed')
ax2.axvline(x=offsets[i]/sr*1000,color='darkslategrey',linestyle='dashed')
ax2.set_ylabel('Amplitude (V)')
# Plot smoothed amplitude of the song as per spectrogram index
ax3.plot(x_amp/sr*1000, amp,color='grey')
for i in range(0,len(onsets)):
ax3.axvline(x=onsets[i]/sr*1000,color='olivedrab',linestyle='dashed')
ax3.axvline(x=offsets[i]/sr*1000,color='darkslategrey',linestyle='dashed')
ax3.axhline(y=args.threshold,color='black',label='Threshold')
ax3.legend()
ax3.set_ylabel('Amplitude (V)')
ax3.set_xlabel('Time (ms)')
ax1.spines['right'].set_visible(False)
ax1.spines['top'].set_visible(False)
ax1.spines['bottom'].set_visible(False)
ax2.spines['right'].set_visible(False)
ax2.spines['top'].set_visible(False)
ax2.spines['bottom'].set_visible(False)
ax3.spines['right'].set_visible(False)
ax3.spines['top'].set_visible(False)
ax3.spines['bottom'].set_visible(False)
ax1.tick_params(axis='x', labelbottom=False, bottom=False)
ax2.tick_params(axis='x', labelbottom=False, bottom=False)
ax3.tick_params(axis='x', labelbottom=True, bottom=True)
plt.savefig(args.data_dir + '/' + args.output_dir + '/' + 'Test_selection_0' + '.' + args.format)
# Duration, spectral flatness, mean pitch
dur_syll = np.zeros((np.size(onsets),))
dur_gap = np.zeros((np.size(onsets),))
wiener = np.zeros((np.size(onsets),))
mean_pitch = np.zeros((np.size(onsets),))
max_pitch = np.zeros((np.size(onsets),))
min_pitch = np.zeros((np.size(onsets),))
direction_pitch = np.zeros((np.size(onsets),))
for j in range(0, | np.size(onsets) | numpy.size |
# Utility Functions
# Authors: <NAME>
# Edited by: <NAME>
'''
Used by the user to define channels that are hard coded for analysis.
'''
# Imports necessary for this function
import numpy as np
import re
from itertools import combinations
def splitpatient(patient):
stringtest = patient.find('seiz')
if stringtest == -1:
stringtest = patient.find('sz')
if stringtest == -1:
stringtest = patient.find('aw')
if stringtest == -1:
stringtest = patient.find('aslp')
if stringtest == -1:
stringtest = patient.find('_')
if stringtest == -1:
print("Not sz, seiz, aslp, or aw! Please add additional naming possibilities, or tell data gatherers to rename datasets.")
else:
pat_id = patient[0:stringtest]
seiz_id = patient[stringtest:]
# remove any underscores
pat_id = re.sub('_', '', pat_id)
seiz_id = re.sub('_', '', seiz_id)
return pat_id, seiz_id
def returnindices(pat_id, seiz_id=None):
included_indices, onsetelecs, clinresult = returnnihindices(
pat_id, seiz_id)
if included_indices.size == 0:
included_indices, onsetelecs, clinresult = returnlaindices(
pat_id, seiz_id)
if included_indices.size == 0:
included_indices, onsetelecs, clinresult = returnummcindices(
pat_id, seiz_id)
if included_indices.size == 0:
included_indices, onsetelecs, clinresult = returnjhuindices(
pat_id, seiz_id)
if included_indices.size == 0:
included_indices, onsetelecs, clinresult = returntngindices(
pat_id, seiz_id)
return included_indices, onsetelecs, clinresult
def returntngindices(pat_id, seiz_id):
included_indices = np.array([])
onsetelecs = None
clinresult = -1
if pat_id == 'id001ac':
# included_indices = np.concatenate((np.arange(0,4), np.arange(5,55),
# np.arange(56,77), np.arange(78,80)))
included_indices = np.array([0, 1, 5, 6, 7, 8, 9, 10, 11, 12, 13,
15, 16, 17, 18, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
32, 33, 35, 36, 37, 38, 39, 40, 41, 42, 43, 45, 46, 47, 48,
49, 50, 51, 52, 53, 58, 59, 60, 61, 62, 63, 64, 65, 66, 68,
69, 70, 71, 72, 73, 74, 75, 76, 78, 79])
elif pat_id == 'id002cj':
# included_indices = np.array(np.arange(0,184))
included_indices = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8,
15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28,
30, 31, 32, 33, 34, 35, 36, 37, 38,
45, 46, 47, 48, 49, 50, 51, 52, 53,
60, 61, 62, 63, 64, 65, 66, 67, 70, 71, 72, 73, 74, 75, 76, 85, 86, 87, 88, 89,
90, 91, 92, 93, 100, 101, 102, 103, 104, 105,
106, 107, 108, 115, 116, 117, 118, 119,
120, 121, 122, 123, 129, 130, 131, 132, 133,
134, 135, 136, 137,
# np.arange(143, 156)
143, 144, 145, 146, 147,
148, 149, 150, 151, 157, 158, 159, 160, 161,
162, 163, 164, 165, 171, 172, 173, 174, 175,
176, 177, 178, 179, 180, 181, 182])
elif pat_id == 'id003cm':
included_indices = np.concatenate((np.arange(0,13), np.arange(25,37),
np.arange(40,50), np.arange(55,69), np.arange(70,79)))
elif pat_id == 'id004cv':
# removed OC'10, SC'5, CC'14/15
included_indices = np.concatenate((np.arange(0,23), np.arange(25,39),
np.arange(40,59), np.arange(60,110)))
elif pat_id == 'id005et':
included_indices = np.concatenate((np.arange(0,39), np.arange(39,47),
np.arange(52,62), np.arange(62,87)))
elif pat_id == 'id006fb':
included_indices = np.concatenate((np.arange(10,19), np.arange(40,50),
np.arange(115,123)))
elif pat_id == 'id008gc':
included_indices = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 12, 13, 14, 15, 16, 17,
18, 19, 20, 21, 22, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 36, 37, 38, 39, 40,
41, 42, 43, 44, 45, 46, 48, 49, 50, 51, 52, 53, 54, 56, 57, 58, 61, 62, 63, 64, 65,
71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 83, 84, 85, 86, 87, 88, 89, 90, 92, 93,
94, 95, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 110, 111])
elif pat_id == 'id009il':
included_indices = np.concatenate((np.arange(0,10), np.arange(10,152)))
elif pat_id == 'id010js':
included_indices = np.concatenate((np.arange(0,14),
np.arange(15,29), np.arange(30,42), np.arange(43,52),
np.arange(53,65), np.arange(66,75), np.arange(76,80),
np.arange(81,85), np.arange(86,94), np.arange(95,98),
np.arange(99,111),
np.arange(112,124)))
elif pat_id == 'id011ml':
included_indices = np.concatenate((np.arange(0,18), np.arange(21,68),
np.arange(69,82), np.arange(82,125)))
elif pat_id == 'id012pc':
included_indices = np.concatenate((np.arange(0,4), np.arange(9,17),
np.arange(18,28), np.arange(31,41), np.arange(44,56),
np.arange(57,69), np.arange(70,82), np.arange(83,96),
np.arange(97,153)))
elif pat_id == 'id013pg':
included_indices = np.array([2, 3, 4, 5, 15, 18, 19, 20, 21, 23, 24,
25, 30, 31, 32, 33, 34, 35, 36, 37, 38, 50, 51, 52, 53, 54, 55, 56,
57, 58, 60, 61, 62, 63, 64, 65, 66, 67, 68, 70, 71, 72, 73, 74, 75,
76, 77, 78])
elif pat_id == 'id014rb':
included_indices = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13,
14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,
33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50,
51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67,
68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84,
85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101,
102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115,
116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129,
130, 131, 132, 133, 135, 136, 140, 141, 142, 143, 144, 145, 146,
147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159,
160, 161, 162, 163, 164])
elif pat_id == 'id015sf':
included_indices = np.concatenate((np.arange(0,37), np.arange(38,77),
np.arange(78,121)))
return included_indices, onsetelecs, clinresult
def returnnihindices(pat_id, seiz_id):
included_indices = np.array([])
onsetelecs = None
clinresult = -1
if pat_id == 'pt1':
included_indices = np.concatenate((np.arange(0, 36), np.arange(41, 43),
np.arange(45, 69), np.arange(71, 95)))
onsetelecs = set(['ATT1', 'ATT2', 'AD1', 'AD2', 'AD3', 'AD4',
'PD1', 'PD2', 'PD3', 'PD4'])
resectelecs = set(['ATT1', 'ATT2', 'ATT3', 'ATT4', 'ATT5', 'ATT6', 'ATT7', 'ATT8',
'AST1', 'AST2', 'AST3', 'AST4',
'PST1', 'PST2', 'PST3', 'PST4',
'AD1', 'AD2', 'AD3', 'AD4',
'PD1', 'PD2', 'PD3', 'PD4',
'PLT5', 'PLT6', 'SLT1'])
clinresult = 1
elif pat_id == 'pt2':
# [1:14 16:19 21:25 27:37 43 44 47:74]
included_indices = np.concatenate((np.arange(0, 14), np.arange(15, 19),
np.arange(
20, 25), np.arange(
26, 37), np.arange(
42, 44),
np.arange(46, 74)))
onsetelecs = set(['MST1', 'PST1', 'AST1', 'TT1'])
resectelecs = set(['TT1', 'TT2', 'TT3', 'TT4', 'TT6', 'TT6',
'G1', 'G2', 'G3', 'G4', 'G9', 'G10', 'G11', 'G12', 'G18', 'G19',
'G20', 'G26', 'G27',
'AST1', 'AST2', 'AST3', 'AST4',
'MST1', 'MST2', 'MST3', 'MST4'])
clinresult = 1
elif pat_id == 'pt3':
# [1:19 21:37 42:43 46:69 71:107]
included_indices = np.concatenate((np.arange(0, 19), np.arange(20, 37),
np.arange(41, 43), np.arange(45, 69), np.arange(70, 107)))
onsetelecs = set(['SFP1', 'SFP2', 'SFP3',
'IFP1', 'IFP2', 'IFP3',
'MFP2', 'MFP3',
'OF1', 'OF2', 'OF3', 'OF4'])
resectelecs = set(['FG1', 'FG2', 'FG9', 'FG10', 'FG17', 'FG18', 'FG25',
'SFP1', 'SFP2', 'SFP3', 'SFP4', 'SFP5', 'SFP6', 'SFP7', 'SFP8',
'MFP1', 'MFP2', 'MFP3', 'MFP4', 'MFP5', 'MFP6',
'IFP1', 'IFP2', 'IFP3', 'IFP4',
'OF3', 'OF4'])
clinresult = 1
elif pat_id == 'pt4':
# [1:19 21:37 42:43 46:69 71:107]
included_indices = np.concatenate((np.arange(0, 19), np.arange(20, 26),
np.arange(28, 36)))
onsetelecs = set([])
resectelecs = set([])
clinresult = -1
elif pat_id == 'pt5':
included_indices = np.concatenate((np.arange(0, 19), np.arange(20, 26),
np.arange(28, 36)))
onsetelecs = set([])
resectelecs = set([])
clinresult = -1
elif pat_id == 'pt6':
# [1:36 42:43 46 52:56 58:71 73:95]
included_indices = np.concatenate((np.arange(0, 36), np.arange(41, 43),
np.arange(45, 46), np.arange(51, 56), np.arange(57, 71), np.arange(72, 95)))
onsetelecs = set(['LA1', 'LA2', 'LA3', 'LA4',
'LAH1', 'LAH2', 'LAH3', 'LAH4',
'LPH1', 'LPH2', 'LPH3', 'LPH4'])
resectelecs = set(['LALT1', 'LALT2', 'LALT3', 'LALT4', 'LALT5', 'LALT6',
'LAST1', 'LAST2', 'LAST3', 'LAST4',
'LA1', 'LA2', 'LA3', 'LA4', 'LPST4',
'LAH1', 'LAH2', 'LAH3', 'LAH4',
'LPH1', 'LPH2'])
clinresult = 2
elif pat_id == 'pt7':
# [1:17 19:35 37:38 41:62 67:109]
included_indices = np.concatenate((np.arange(0, 17), np.arange(18, 35),
np.arange(36, 38), np.arange(40, 62), np.arange(66, 109)))
onsetelecs = set(['MFP1', 'LFP3',
'PT2', 'PT3', 'PT4', 'PT5',
'MT2', 'MT3',
'AT3', 'AT4',
'G29', 'G30', 'G39', 'G40', 'G45', 'G46'])
resectelecs = set(['G28', 'G29', 'G30', 'G36', 'G37', 'G38', 'G39',
'G41', 'G44', 'G45', 'G46',
'LFP1', 'LFP2', 'LSF3', 'LSF4'])
clinresult = 3
elif pat_id == 'pt8':
# [1:19 21 23 30:37 39:40 43:64 71:76]
included_indices = np.concatenate((np.arange(0, 19), np.arange(20, 21),
np.arange(
22, 23), np.arange(
29, 37), np.arange(
38, 40),
np.arange(42, 64), np.arange(70, 76)))
onsetelecs = set(['G19', 'G23', 'G29', 'G30', 'G31',
'TO6', 'TO5',
'MST3', 'MST4',
'O8', 'O9'])
resectelecs = set(['G22', 'G23', 'G27', 'G28', 'G29', 'G30', 'G31',
'MST2', 'MST3', 'MST4', 'PST2', 'PST3', 'PST4'])
clinresult = 1
elif pat_id == 'pt10':
# [1:3 5:19 21:35 48:69]
included_indices = np.concatenate((np.arange(0, 3), np.arange(4, 19),
np.arange(20, 35), np.arange(47, 69)))
onsetelecs = set(['TT1', 'TT2', 'TT4', 'TT6',
'MST1',
'AST2'])
resectelecs = set(['G3', 'G4', 'G5', 'G6', 'G11', 'G12', 'G13', 'G14',
'TT1', 'TT2', 'TT3', 'TT4', 'TT5', 'TT6', 'AST1', 'AST2', 'AST3', 'AST4'])
clinresult = 2
elif pat_id == 'pt11':
# [1:19 21:35 37 39 40 43:74 76:81 83:84]
included_indices = np.concatenate((np.arange(0, 19), np.arange(20, 35),
np.arange(
36, 37), np.arange(
38, 40), np.arange(
42, 74),
np.arange(75, 81), np.arange(82, 84)))
onsetelecs = set(['RG29', 'RG30', 'RG31', 'RG37', 'RG38', 'RG39',
'RG44', 'RG45'])
resectelecs = set(['RG4', 'RG5', 'RG6', 'RG7', 'RG12', 'RG13', 'RG14', 'RG15',
'RG21', 'RG22', 'RG23', 'RG29', 'RG30', 'RG31', 'RG37', 'RG38', 'RG39', 'RG45', 'RG46', 'RG47'])
resectelecs = set(['RG4', 'RG5', 'RG6', 'RG7', 'RG12',
'RG13', 'RG14', 'RG15',
'RG21', 'RG22', 'RG23', 'RG29', 'RG30',
'RG31', 'RG37', 'RG38', 'RG39', 'RG45', 'RG46', 'RG47'])
clinresult = 1
elif pat_id == 'pt12':
# [1:15 17:33 38:39 42:61]
included_indices = np.concatenate((np.arange(0, 15), np.arange(16, 33),
np.arange(37, 39), np.arange(41, 61)))
onsetelecs = set(['AST1', 'AST2',
'TT2', 'TT3', 'TT4', 'TT5'])
resectelecs = set(['G19', 'G20', 'G21', 'G22', 'G23', 'G27', 'G28', 'G29', 'G30', 'G31',
'TT1', 'TT2', 'TT3', 'TT4', 'TT5', 'TT6',
'AST1', 'AST2', 'AST3', 'AST4',
'MST1', 'MST2', 'MST3', 'MST4'])
clinresult = 2
elif pat_id == 'pt13':
# [1:36 39:40 43:66 69:74 77 79:94 96:103 105:130]
included_indices = np.concatenate((np.arange(0, 36), np.arange(38, 40),
np.arange(
42, 66), np.arange(
68, 74), np.arange(
76, 77),
np.arange(78, 94), np.arange(95, 103), np.arange(104, 130)))
onsetelecs = set(['G1', 'G2', 'G9', 'G10', 'G17', 'G18'])
resectelecs = set(['G1', 'G2', 'G3', 'G4', 'G9', 'G10', 'G11',
'G17', 'G18', 'G19',
'AP2', 'AP3', 'AP4'])
clinresult = 1
elif pat_id == 'pt14':
# [1:19 21:37 41:42 45:61 68:78]
included_indices = np.concatenate((np.arange(0, 3), np.arange(6, 10),
np.arange(
11, 17), np.arange(
18, 19), np.arange(
20, 37),
np.arange(40, 42), np.arange(44, 61), np.arange(67, 78)))
onsetelecs = set(['MST1', 'MST2',
'TT1', 'TT2', 'TT3',
'AST1', 'AST2'])
resectelecs = set(['TT1', 'TT2', 'TT3', 'AST1', 'AST2',
'MST1', 'MST2', 'PST1'])
clinresult = 4
elif pat_id == 'pt15':
# [2:7 9:30 32:36 41:42 45:47 49:66 69 71:85];
included_indices = np.concatenate((np.arange(1, 7), np.arange(8, 30),
np.arange(
31, 36), np.arange(
40, 42), np.arange(
44, 47),
np.arange(48, 66), np.arange(68, 69), np.arange(70, 85)))
onsetelecs = set(['TT1', 'TT2', 'TT3', 'TT4',
'MST1', 'MST2', 'AST1', 'AST2', 'AST3'])
resectelecs = set(['G2', 'G3', 'G4', 'G5', 'G10', 'G11', 'G12', 'G13',
'TT1', 'TT2', 'TT3', 'TT4', 'TT5',
'AST1', 'AST2', 'AST3', 'AST4',
'MST1', 'MST2', 'MST3', 'MST4'])
clinresult = 1
elif pat_id == 'pt16':
# [1:19 21:37 42:43 46:53]
included_indices = np.concatenate((np.arange(0, 19), np.arange(20, 37),
np.arange(41, 43), np.arange(45, 53)))
onsetelecs = set(['TT1', 'TT2', 'TT3', 'TT4', 'TT5', 'TT6',
'AST1', 'AST2', 'AST3', 'AST4',
'MST3', 'MST4',
'G26', 'G27', 'G28', 'G18', 'G19', 'G20', 'OF4'])
resectelecs = set(['G18', 'G19', 'G20', 'G26', 'G27', 'G28',
'G29', 'G30', 'TT1', 'TT2', 'TT3', 'TT4', 'TT5', 'TT6',
'AST1', 'AST2', 'AST3', 'AST4',
'MST1', 'MST2', 'MST3', 'MST4'
])
clinresult = 1
elif pat_id == 'pt17':
# [1:19 21:37 42:43 46:51]
included_indices = np.concatenate((np.arange(0, 19), np.arange(20, 37),
np.arange(41, 43), np.arange(45, 51)))
onsetelecs = set(['TT1', 'TT2'])
resectelecs = set(['G27', 'G28', 'G29', 'G30',
'TT', 'TT2', 'TT3', 'TT4', 'TT5', 'TT6',
'AST1', 'AST2', 'AST3', 'AST4',
'MST1', 'MST2', 'MST3', 'MST4'])
clinresult = 1
return included_indices, onsetelecs, clinresult
def returnlaindices(pat_id, seiz_id):
included_indices = np.array([])
onsetelecs = None
clinresult = -1
spreadelecs = None
if pat_id == 'la01':
# [1 3 7:8 11:13 17:19 22:26 32 34:35 37 42 50:55 58 ...
# 62:65 70:72 77:81 84:97 100:102 105:107 110:114 120:121 130:131];
# onset_electrodes = {'Y''1', 'X''4', ...
# 'T''5', 'T''6', 'O''1', 'O''2', 'B1', 'B2',...% rare onsets
# }
included_indices = np.concatenate((np.arange(0, 3), np.arange(6, 8), np.arange(10, 13),
np.arange(
16, 19), np.arange(
21, 26), np.arange(
31, 32),
np.arange(
33, 35), np.arange(
36, 37), np.arange(
41, 42),
np.arange(
49, 55), np.arange(
57, 58), np.arange(
61, 65),
np.arange(
69, 72), np.arange(
76, 81), np.arange(
83, 97),
np.arange(
99, 102), np.arange(
104, 107), np.arange(
109, 114),
np.arange(119, 121), np.arange(129, 131)))
onsetelecs = ["X'4", "T'5", "T'6", "O'1", "O'2", "B1", "B2"]
spreadelecs = ["P1", "P2", 'P6', "X1", "X8", "X9", "E'2", "E'3"
"T'1"]
if seiz_id == 'inter2':
included_indices = np.concatenate((np.arange(0, 1), np.arange(7, 16), np.arange(21, 28),
np.arange(
33, 36), np.arange(
39, 40), np.arange(
42, 44), np.arange(
46, 50),
np.arange(
56, 58), np.arange(
62, 65), np.arange(
66, 68), np.arange(
69, 75),
np.arange(76, 83), np.arange(85, 89), np.arange(96, 103),
np.arange(106, 109), np.arange(111, 115), np.arange(116, 117),
np.arange(119, 123), np.arange(126, 127), np.arange(130, 134),
np.arange(136, 137), np.arange(138, 144), np.arange(146, 153)))
if seiz_id == 'ictal2':
included_indices = np.concatenate((np.arange(0, 4), np.arange(6, 19), np.arange(20, 33),
np.arange(
34, 37), np.arange(
38, 40), np.arange(
42, 98),
np.arange(107, 136), np.arange(138, 158)))
onsetelecs = ["Y'1"]
clinresult = 1
elif pat_id == 'la02':
# [1:4 7 9 11:12 15:18 21:28 30:34 47 50:62 64:67 ...
# 70:73 79:87 90 95:99]
included_indices = np.concatenate((np.arange(0, 4), np.arange(6, 7), np.arange(8, 9),
np.arange(
10, 12), np.arange(
14, 18), np.arange(
20, 28),
np.arange(
29, 34), np.arange(
46, 47), np.arange(
49, 62),
np.arange(
63, 67), np.arange(
69, 73), np.arange(
78, 87),
np.arange(89, 90), np.arange(94, 99)))
onsetelecs = ["L'2", "L'3", "L'4"]
clinresult = 1
elif pat_id == 'la03':
# [1:3 6:33 36:68 77:163]
included_indices = np.concatenate((np.arange(0, 3), np.arange(5, 33),
| np.arange(35, 68) | numpy.arange |
import numpy as np
def energy(X,W_fixed):
"""
Input X is a vector (N by 1), and the shape of W_fixed is N by N.\n
Return a 2D array E with shape (1,1)
"""
return (-1/2) * np.dot(np.dot(np.transpose(X),W_fixed),X)
def f(W,X):
"""
W : W_fixed after storaging phase with shape (N,N) \n
X : testing pattern X with shape (N,1) \n
Return X_new with shape (N,1)
"""
i = 0
v = np.dot(W,X)
for value in v :
if value > 0 :
v[i,0] = 1
elif value < 0 :
v[i,0] = -1
else :
v[i,0] = X[i,0]
i += 1
return v
X0 = np.array([ 1,-1, 1,-1, 1,-1])
X2 = np.array([-1, 1,-1, 1,-1, 1])
X4 = np.array([ 1, 1, 1, 1, 1, 1])
X6 = np.array([-1,-1,-1,-1,-1,-1])
X0 = X0.reshape(1,X0.size)
X2 = X2.reshape(1,X2.size)
X4 = X4.reshape(1,X4.size)
X6 = X6.reshape(1,X6.size)
X = | np.array([X0,X2,X4,X6]) | numpy.array |
import numpy as np
import time
from sklearn.utils import check_random_state
from scipy.special import expit
def sigmoid(x):
return expit(np.clip(x, -30, 30))
class RestrictedBoltzmannMachine:
def __init__(self, n_hidden_variables, learning_rate=0.1, batch_size=20,
n_epochs=15, mu=0.5, pcd_steps=1, random_state=None, verbose=0):
self.n_hidden = n_hidden_variables
self.random_state = random_state
self.n_epochs = n_epochs
self.batch_size = batch_size
self.learning_rate = learning_rate
self.mu = mu
self.pcd_steps = pcd_steps
self.verbose = verbose
def fit(self, X):
self.random_state_ = check_random_state(self.random_state)
X = np.asarray(X, dtype=np.bool)
self.n_visible_ = X.shape[1]
self.init_parameters()
self.stochastic_gradient_descent(X)
return self
def init_parameters(self):
sdev = 1.0 / np.sqrt(self.n_visible_)
dim = (self.n_hidden, self.n_visible_)
self.W_ = self.random_state_.normal(0, sdev, size=dim)
self.b_ = np.zeros(self.n_visible_)
self.c_ = np.zeros(self.n_hidden)
self.VW_ = np.zeros(self.W_.shape)
self.Vb_ = np.zeros(self.b_.shape)
self.Vc_ = np.zeros(self.c_.shape)
self.V = None
def stochastic_gradient_descent(self, X):
bs = self.batch_size
now = time.time()
for epoch in range(self.n_epochs):
s = self.random_state_.permutation(X.shape[0])
X_s = X[s]
for i in range(0, X_s.shape[0], bs):
self.gradient_descent_step(X_s[i: i + bs])
if self.verbose > 2:
print('Epoch {0} ({1:.1f}%).'.format(epoch + 1,
100*float(i)/(X_s.shape[0] - 1)))
if self.verbose > 0:
now, last = time.time(), now
print('Epoch {0} ({1:.01f}s).'.format(epoch + 1, now - last))
if self.verbose > 3:
print('Average reconstruction error: {0:.3f}.'.\
format(self.reconstruction_error(X[0: 10*bs])))
def gradient_descent_step(self, X):
if self.V is None:
self.V = np.array(X, dtype=np.bool)
for i, vi in enumerate(self.V):
self.V[i] = self.sample(vi, 1, thinning=self.pcd_steps - 1)[0]
neg_W = | np.zeros(self.W_.shape) | numpy.zeros |
from __future__ import print_function
import numpy as np
import itertools
from numpy.testing import (assert_equal,
assert_almost_equal,
assert_array_equal,
assert_array_almost_equal,
suppress_warnings)
import pytest
from pytest import raises as assert_raises
from pytest import warns as assert_warns
from scipy.spatial import SphericalVoronoi, distance
from scipy.spatial import _spherical_voronoi as spherical_voronoi
from scipy.spatial.transform import Rotation
from scipy.optimize import linear_sum_assignment
TOL = 1E-10
class TestSphericalVoronoi(object):
def setup_method(self):
self.points = np.array([
[-0.78928481, -0.16341094, 0.59188373],
[-0.66839141, 0.73309634, 0.12578818],
[0.32535778, -0.92476944, -0.19734181],
[-0.90177102, -0.03785291, -0.43055335],
[0.71781344, 0.68428936, 0.12842096],
[-0.96064876, 0.23492353, -0.14820556],
[0.73181537, -0.22025898, -0.6449281],
[0.79979205, 0.54555747, 0.25039913]]
)
# Issue #9386
self.hemisphere_points = np.array([
[0.88610999, -0.42383021, 0.18755541],
[0.51980039, -0.72622668, 0.4498915],
[0.56540011, -0.81629197, -0.11827989],
[0.69659682, -0.69972598, 0.15854467]])
# Issue #8859
phi = np.linspace(0, 2 * np.pi, 10, endpoint=False) # azimuth angle
theta = np.linspace(0.001, np.pi * 0.4, 5) # polar angle
theta = theta[np.newaxis, :].T
phiv, thetav = np.meshgrid(phi, theta)
phiv = np.reshape(phiv, (50, 1))
thetav = np.reshape(thetav, (50, 1))
x = np.cos(phiv) * np.sin(thetav)
y = np.sin(phiv) * np.sin(thetav)
z = np.cos(thetav)
self.hemisphere_points2 = np.concatenate([x, y, z], axis=1)
def test_constructor(self):
center = np.array([1, 2, 3])
radius = 2
s1 = SphericalVoronoi(self.points)
# user input checks in SphericalVoronoi now require
# the radius / center to match the generators so adjust
# accordingly here
s2 = SphericalVoronoi(self.points * radius, radius)
s3 = SphericalVoronoi(self.points + center, center=center)
s4 = SphericalVoronoi(self.points * radius + center, radius, center)
assert_array_equal(s1.center, np.array([0, 0, 0]))
assert_equal(s1.radius, 1)
assert_array_equal(s2.center, np.array([0, 0, 0]))
assert_equal(s2.radius, 2)
assert_array_equal(s3.center, center)
assert_equal(s3.radius, 1)
assert_array_equal(s4.center, center)
assert_equal(s4.radius, radius)
def test_vertices_regions_translation_invariance(self):
sv_origin = SphericalVoronoi(self.points)
center = np.array([1, 1, 1])
sv_translated = SphericalVoronoi(self.points + center, center=center)
assert_equal(sv_origin.regions, sv_translated.regions)
assert_array_almost_equal(sv_origin.vertices + center,
sv_translated.vertices)
def test_vertices_regions_scaling_invariance(self):
sv_unit = SphericalVoronoi(self.points)
sv_scaled = SphericalVoronoi(self.points * 2, 2)
assert_equal(sv_unit.regions, sv_scaled.regions)
assert_array_almost_equal(sv_unit.vertices * 2,
sv_scaled.vertices)
def test_old_radius_api(self):
sv_unit = SphericalVoronoi(self.points, radius=1)
with suppress_warnings() as sup:
sup.filter(DeprecationWarning, "`radius` is `None`")
sv = SphericalVoronoi(self.points, None)
assert_array_almost_equal(sv_unit.vertices, sv.vertices)
def test_old_radius_api_warning(self):
with assert_warns(DeprecationWarning):
sv = SphericalVoronoi(self.points, None)
def test_sort_vertices_of_regions(self):
sv = SphericalVoronoi(self.points)
unsorted_regions = sv.regions
sv.sort_vertices_of_regions()
assert_equal(sorted(sv.regions), sorted(unsorted_regions))
def test_sort_vertices_of_regions_flattened(self):
expected = sorted([[0, 6, 5, 2, 3], [2, 3, 10, 11, 8, 7], [0, 6, 4, 1],
[4, 8, 7, 5, 6], [9, 11, 10], [2, 7, 5],
[1, 4, 8, 11, 9], [0, 3, 10, 9, 1]])
expected = list(itertools.chain(*sorted(expected)))
sv = SphericalVoronoi(self.points)
sv.sort_vertices_of_regions()
actual = list(itertools.chain(*sorted(sv.regions)))
assert_array_equal(actual, expected)
def test_sort_vertices_of_regions_dimensionality(self):
points = np.array([[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1],
[0.5, 0.5, 0.5, 0.5]])
with pytest.raises(TypeError, match="three-dimensional"):
sv = spherical_voronoi.SphericalVoronoi(points)
sv.sort_vertices_of_regions()
def test_num_vertices(self):
# for any n >= 3, a spherical Voronoi diagram has 2n - 4
# vertices; this is a direct consequence of Euler's formula
# as explained by <NAME> Mamede (2010) Proceedings of the
# 2010 International Symposium on Voronoi Diagrams in Science
# and Engineering
sv = SphericalVoronoi(self.points)
expected = self.points.shape[0] * 2 - 4
actual = sv.vertices.shape[0]
assert_equal(actual, expected)
def test_voronoi_circles(self):
sv = spherical_voronoi.SphericalVoronoi(self.points)
for vertex in sv.vertices:
distances = distance.cdist(sv.points, np.array([vertex]))
closest = np.array(sorted(distances)[0:3])
assert_almost_equal(closest[0], closest[1], 7, str(vertex))
assert_almost_equal(closest[0], closest[2], 7, str(vertex))
def test_duplicate_point_handling(self):
# an exception should be raised for degenerate generators
# related to Issue# 7046
self.degenerate = np.concatenate((self.points, self.points))
with assert_raises(ValueError):
sv = spherical_voronoi.SphericalVoronoi(self.degenerate)
def test_incorrect_radius_handling(self):
# an exception should be raised if the radius provided
# cannot possibly match the input generators
with assert_raises(ValueError):
sv = spherical_voronoi.SphericalVoronoi(self.points,
radius=0.98)
def test_incorrect_center_handling(self):
# an exception should be raised if the center provided
# cannot possibly match the input generators
with assert_raises(ValueError):
sv = spherical_voronoi.SphericalVoronoi(self.points,
center=[0.1, 0, 0])
def test_single_hemisphere_handling(self):
# Test solution of Issues #9386, #8859
for points in [self.hemisphere_points, self.hemisphere_points2]:
sv = SphericalVoronoi(points)
triangles = sv._tri.points[sv._tri.simplices]
dots = np.einsum('ij,ij->i', sv.vertices, triangles[:, 0])
circumradii = np.arccos(np.clip(dots, -1, 1))
assert np.max(circumradii) > np.pi / 2
def test_rank_deficient(self):
# rank-1 input cannot be triangulated
points = np.array([[-1, 0, 0], [1, 0, 0]])
with pytest.raises(ValueError, match="Rank of input points"):
sv = spherical_voronoi.SphericalVoronoi(points)
@pytest.mark.parametrize("n", [8, 15, 21])
@pytest.mark.parametrize("radius", [0.5, 1, 2])
@pytest.mark.parametrize("center", [(0, 0, 0), (1, 2, 3)])
def test_geodesic_input(self, n, radius, center):
U = Rotation.random(random_state=0).as_matrix()
thetas = np.linspace(0, 2 * np.pi, n, endpoint=False)
points = np.vstack([np.sin(thetas), np.cos(thetas), np.zeros(n)]).T
points = radius * points @ U
sv = SphericalVoronoi(points + center, radius=radius, center=center)
# each region must have 4 vertices
region_sizes = np.array([len(region) for region in sv.regions])
assert (region_sizes == 4).all()
regions = np.array(sv.regions)
# vertices are those between each pair of input points + north and
# south poles
vertices = sv.vertices - center
assert len(vertices) == n + 2
# verify that north and south poles are orthogonal to geodesic on which
# input points lie
poles = vertices[n:]
assert np.abs(np.dot(points, poles.T)).max() < 1E-10
for point, region in zip(points, sv.regions):
cosine = np.dot(vertices[region], point)
sine = np.linalg.norm(np.cross(vertices[region], point), axis=1)
arclengths = radius * np.arctan2(sine, cosine)
# test arc lengths to poles
| assert_almost_equal(arclengths[[1, 3]], radius * np.pi / 2) | numpy.testing.assert_almost_equal |
"""
Copyright 2019 <NAME> <<EMAIL>>
This file is part of localreg.
localreg is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
localreg is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with localreg. If not, see <http://www.gnu.org/licenses/>.
"""
# TODO
#
# One could consider making the kernels callable objects. These objects could
# then have a member function without if-testing, which is faster in case it
# is known that all datapoints are to be included. This is the case when
# frac!=None. It could also have a property for its width?
#
import numpy as np
import logging
logger = logging.getLogger("localreg")
logging.basicConfig()
def polyfit(x, y, x0, weights=None, degree=2):
if len(x) == 0:
return np.nan * np.ones_like(x0)
if weights is None:
weights = np.ones_like(x)
s = np.sqrt(weights)
X = x[:, None] ** np.arange(degree + 1)
X0 = x0[:, None] ** np.arange(degree + 1)
lhs = X * s[:, None]
rhs = y * s
# This is what NumPy uses for default from version 1.15 onwards,
# and what 1.14 uses when rcond=None. Computing it here ensures
# support for older versions of NumPy.
rcond = np.finfo(lhs.dtype).eps * max(*lhs.shape)
beta = np.linalg.lstsq(lhs, rhs, rcond=rcond)[0]
rslt = {"beta_fit": beta, "y_fit": X0.dot(beta)}
return rslt
def rectangular(t):
res = np.zeros_like(t)
ind = np.where(np.abs(t) <= 1)
res[ind] = 0.5
return res
def triangular(t):
res = np.zeros_like(t)
ind = np.where(np.abs(t) <= 1)
res[ind] = 1 - np.abs(t[ind])
return res
def epanechnikov(t):
res = np.zeros_like(t)
ind = np.where(np.abs(t) <= 1)
res[ind] = 0.75 * (1 - t[ind] ** 2)
return res
def biweight(t):
res = np.zeros_like(t)
ind = np.where( | np.abs(t) | numpy.abs |
import os
import numpy as np
import torch
import warnings
import sys
import logging
from resunet import UNet
from utils import preprocess, postrocessing, reshape_mask
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
warnings.filterwarnings("ignore", category=UserWarning)
# stores urls and number of classes of the models
model_urls = {
("unet", "r231"): ("unet_r231-d5d2fc3d.pth", 3),
("unet", "ltrclobes"): ("unet_ltrclobes-3a07043d.pth", 6),
("unet", "r231covidweb"): ("unet_r231covid-0de78a7e.pth", 3),
}
def apply(image, model, device, volume_postprocessing=True):
tvolslices, xnew_box = preprocess(image, resolution=[256, 256])
tvolslices[tvolslices > 600] = 600
tvolslices = np.divide((tvolslices + 1024), 1624)
timage_res = np.empty((np.append(0, tvolslices[0].shape)), dtype=np.uint8)
with torch.no_grad():
X = torch.Tensor(tvolslices).unsqueeze(0).to(device)
prediction = model(X)
pls = torch.max(prediction, 1)[1].detach().cpu().numpy().astype(np.uint8)
timage_res = | np.vstack((timage_res, pls)) | numpy.vstack |
import warnings
from pathlib import Path
import logging
import sys
import matplotlib.pyplot as plt
import matplotlib.colors as mcolors
from mpl_toolkits.axes_grid1 import make_axes_locatable
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
import matplotlib.gridspec as gridspec
import matplotlib.ticker as mtick
import pandas as pd
from matplotlib.ticker import FormatStrFormatter
from typing import Any
from tifffile import imsave
import numpy as np
import seaborn as sns
from tqdm import tqdm, trange
import matplotlib.patches as patches
from preprocessing import embedding
from wavefront import Wavefront
from zernike import Zernike
from synthetic import SyntheticPSF
logging.basicConfig(
stream=sys.stdout,
level=logging.INFO,
format='%(asctime)s - %(levelname)s - %(message)s'
)
logger = logging.getLogger(__name__)
warnings.filterwarnings('ignore')
def plot_training_dist(n_samples=1000, batch_size=200, wavelength=.605):
plt.rcParams.update({
'font.size': 10,
'axes.titlesize': 12,
'axes.labelsize': 12,
'xtick.labelsize': 10,
'ytick.labelsize': 10,
'legend.fontsize': 10,
})
from utils import peak2peak
from utils import microns2waves
psfargs = dict(
n_modes=60,
distribution='zipf',
bimodal=True,
gamma=1.5,
lam_detection=wavelength,
amplitude_ranges=(0, .25),
psf_shape=(32, 32, 32),
x_voxel_size=.15,
y_voxel_size=.15,
z_voxel_size=.6,
batch_size=batch_size,
snr=30,
max_jitter=1,
cpu_workers=-1,
)
n_batches = n_samples // batch_size
peaks = []
zernikes = pd.DataFrame([], columns=range(1, psfargs['n_modes'] + 1))
for _, (psfs, ys) in zip(range(n_batches), SyntheticPSF(**psfargs).generator()):
zernikes = zernikes.append(
pd.DataFrame(microns2waves(ys, wavelength=wavelength), columns=range(1, psfargs['n_modes'] + 1)),
ignore_index=True
)
peaks.extend(list(peak2peak(ys)))
logger.info(zernikes.round(2))
fig, (pax, cax, zax) = plt.subplots(1, 3, figsize=(16, 4))
sns.histplot(peaks, kde=True, ax=pax, color='dimgrey')
pax.set_xlabel(
'Peak-to-peak aberration $|P_{95} - P_{5}|$\n'
rf'($\lambda = {int(wavelength*1000)}~nm$)'
)
pax.set_ylabel(rf'Samples')
# bars = sns.barplot(zernikes.columns.values, zernikes.mean(axis=0), ax=zax)
# for index, label in enumerate(bars.get_xticklabels()):
# if index % int(.1 * zernikes.shape[1]) == 0:
# label.set_visible(True)
# else:
# label.set_visible(False)
# zax.set_xlabel('Average amplitude per zernike mode')
zernikes = np.abs(zernikes)
dmodes = (zernikes[zernikes > .25]).count(axis=1)
hist, bins = np.histogram(dmodes, bins=zernikes.columns.values)
hist = hist / hist.sum()
idx = (hist > 0).nonzero()
bars = sns.barplot(bins[idx], hist[idx], ax=cax)
for index, label in enumerate(bars.get_xticklabels()):
if index % 2 == 0:
label.set_visible(True)
else:
label.set_visible(False)
cax.set_xlabel(
f'Number of modes above diffraction limit\n'
f'($\\alpha > \lambda/4$)'
)
zernikes = zernikes.div(zernikes.sum(axis=1), axis=0)
dmodes = (zernikes[zernikes > .05]).count(axis=1)
hist, bins = np.histogram(dmodes, bins=zernikes.columns.values)
idx = (hist > 0).nonzero()
hist = hist / hist.sum()
bars = sns.barplot(bins[idx], hist[idx], ax=zax, palette='Accent')
for index, label in enumerate(bars.get_xticklabels()):
if index % 2 == 0:
label.set_visible(True)
else:
label.set_visible(False)
zax.set_xlabel(
f'Number of dominant modes\n'
f'$\\alpha / \\sum_{{i=1}}^{{60}}{{\\alpha_{{i}}}} > 5\%$'
)
pax.grid(True, which="both", axis='both', lw=.5, ls='--', zorder=0)
cax.grid(True, which="both", axis='both', lw=.5, ls='--', zorder=0)
zax.grid(True, which="both", axis='both', lw=.5, ls='--', zorder=0)
name = f'{psfargs["distribution"]}_{psfargs["n_modes"]}modes_gamma_{str(psfargs["gamma"]).replace(".", "p")}'
plt.savefig(
f'../data/{name}.png',
dpi=300, bbox_inches='tight', pad_inches=.25
)
def plot_fov(n_modes=60, wavelength=.605, psf_cmap='hot', x_voxel_size=.15, y_voxel_size=.15, z_voxel_size=.6):
plt.rcParams.update({
'font.size': 10,
'axes.titlesize': 12,
'axes.labelsize': 12,
'xtick.labelsize': 10,
'ytick.labelsize': 10,
'legend.fontsize': 10,
})
from utils import peak_aberration, center_crop
waves = np.round(np.arange(0, .5, step=.1), 2)
res = [128, 64, 32]
offsets = [0, 32, 48]
savedir = '../data/fov/ratio_150x-150y-600z'
logger.info(waves)
for i in range(3, n_modes):
fig = plt.figure(figsize=(35, 60))
gs = fig.add_gridspec(len(waves)*len(res), 8)
grid = {}
for a, j in zip(waves, np.arange(0, len(waves)*len(res), step=3)):
for k, r in enumerate(res):
for c in range(8):
grid[(a, r, c)] = fig.add_subplot(gs[j+k, c])
# from pprint import pprint
# pprint(grid)
for j, amp in enumerate(tqdm(waves, desc=f'Mode [#{i}]')):
phi = np.zeros(n_modes)
phi[i] = amp
w = Wavefront(phi, order='ansi')
for r in res:
gen = SyntheticPSF(
amplitude_ranges=(-1, 1),
n_modes=n_modes,
lam_detection=wavelength,
psf_shape=3*[r],
x_voxel_size=x_voxel_size,
y_voxel_size=y_voxel_size,
z_voxel_size=z_voxel_size,
snr=100,
max_jitter=0,
cpu_workers=-1,
)
window = gen.single_psf(w, zplanes=0, normed=True, noise=False)
#window = center_crop(psf, crop_shape=tuple(3 * [r]))
fft = np.fft.fftn(window)
fft = np.fft.fftshift(fft)
fft = np.abs(fft)
# fft[fft == np.inf] = np.nan
# fft[fft == -np.inf] = np.nan
# fft[fft == np.nan] = np.min(fft)
# fft = np.log10(fft)
fft /= np.max(fft)
perfect_psf = gen.single_psf(phi=Wavefront(np.zeros(n_modes)), zplanes=0)
perfect_fft = np.fft.fftn(perfect_psf)
perfect_fft = np.fft.fftshift(perfect_fft)
perfect_fft = np.abs(perfect_fft)
perfect_fft /= np.max(perfect_fft)
fft = fft / perfect_fft
fft[fft > 1] = 0
NA_det = 1.0
n = 1.33
lambda_det = wavelength * 1000
kx = ky = 4 * np.pi * NA_det / lambda_det
kz = 2 * np.pi * ((n - np.sqrt(n**2 - NA_det**2)) / lambda_det)
N = np.array(window.shape)
px = x_voxel_size * 1000
py = y_voxel_size * 1000
pz = z_voxel_size * 1000
# get the axis lengths of the support
hN = np.ceil((N - 1) / 2)
a = 2 * hN[2] * (kx * px) / (2 * np.pi)
b = 2 * hN[1] * (ky * py) / (2 * np.pi)
c = 2 * hN[0] * (kz * pz) / (2 * np.pi)
# formulate the ellipse
Z, Y, X = np.mgrid[-hN[0]:hN[0], -hN[1]:hN[1], -hN[2]:hN[2]]
mask = np.sqrt(X**2/a**2 + Y**2/b**2 + Z**2/c**2)
mask = mask <= 1
for ax in range(3):
vol = np.max(window, axis=ax) ** .5
grid[(amp, r, ax)].imshow(vol, cmap=psf_cmap, vmin=0, vmax=1)
grid[(amp, r, ax)].set_aspect('equal')
if ax == 0:
vol = fft[fft.shape[0]//2, :, :]
vol *= mask[mask.shape[0] // 2, :, :]
elif ax == 1:
vol = fft[:, fft.shape[1]//2, :]
vol *= mask[:, mask.shape[1] // 2, :]
else:
vol = fft[:, :, fft.shape[2]//2]
vol *= mask[:, :, mask.shape[2] // 2]
# vol = np.max(fft, axis=ax) ** .5
# vol = np.nan_to_num(vol)
grid[(amp, r, ax+3)].imshow(vol, vmin=0, vmax=1)
grid[(amp, r, ax+3)].set_aspect('equal')
# draw boxes
for z, rr in enumerate(res):
rect = patches.Rectangle(
(offsets[z], offsets[z]),
rr, rr,
linewidth=1,
edgecolor='w',
facecolor='none'
)
grid[(amp, 128, ax)].add_patch(rect)
grid[(amp, r, ax)].axis('off')
grid[(amp, r, ax+3)].axis('off')
grid[(amp, r, 6)].semilogy(fft[:, fft.shape[0]//2, fft.shape[0]//2], '-', label='XY')
grid[(amp, r, 6)].semilogy(fft[fft.shape[0]//2, :, fft.shape[0]//2], '--', label='XZ')
grid[(amp, r, 6)].semilogy(fft[fft.shape[0]//2, fft.shape[0]//2, :], ':', label='YZ')
grid[(amp, r, 6)].grid(True, which="both", axis='both', lw=.5, ls='--', zorder=0)
grid[(amp, r, 6)].legend(frameon=False, ncol=1, bbox_to_anchor=(1.0, 1.0), loc='upper left')
grid[(amp, r, 6)].set_aspect('equal')
mat = grid[(amp, r, 7)].contourf(
w.wave(100),
levels=np.arange(-10, 10, step=1),
cmap='Spectral_r',
extend='both'
)
grid[(amp, r, 7)].axis('off')
grid[(amp, r, 7)].set_aspect('equal')
grid[(amp, r, 7)].set_title(f'{round(peak_aberration(phi))} waves')
grid[(amp, r, 0)].set_title('XY')
grid[(amp, r, 3)].set_title('XY')
grid[(amp, r, 1)].set_title('XZ')
grid[(amp, r, 4)].set_title('XZ')
grid[(amp, r, 2)].set_title('YZ')
grid[(amp, r, 5)].set_title('YZ')
plt.subplots_adjust(top=0.95, right=0.95, wspace=.2)
plt.savefig(f'{savedir}/fov_mode_{i}.pdf', bbox_inches='tight', pad_inches=.25)
def plot_relratio(
res=64,
padsize=None,
n_modes=15,
wavelength=.605,
x_voxel_size=.15,
y_voxel_size=.15,
z_voxel_size=.6,
savepath='../data/relratio',
):
plt.rcParams.update({
'font.size': 10,
'axes.titlesize': 12,
'axes.labelsize': 12,
'xtick.labelsize': 10,
'ytick.labelsize': 10,
'legend.fontsize': 10,
})
from utils import peak_aberration
vmin, vmax, vcenter, step = 0, 2, 1, .1
highcmap = plt.get_cmap('YlOrRd', 256)
lowcmap = plt.get_cmap('YlGnBu_r', 256)
low = np.linspace(0, 1 - step, int(abs(vcenter - vmin) / step))
high = np.linspace(0, 1 + step, int(abs(vcenter - vmax) / step))
cmap = np.vstack((lowcmap(low), [1, 1, 1, 1], highcmap(high)))
cmap = mcolors.ListedColormap(cmap)
waves = np.round(np.arange(-.25, .3, step=.05), 3)
logger.info(waves)
fig = plt.figure(figsize=(25, 60))
nrows = (n_modes-5) * 6
gs = fig.add_gridspec(nrows, len(waves)+1)
grid = {}
for mode, ax in zip(range(5, n_modes), np.round(np.arange(0, nrows, step=6))):
for k in range(6):
grid[(mode, k, 'wavefront')] = fig.add_subplot(gs[ax + k, 0])
for j, w in enumerate(waves):
grid[(mode, k, w)] = fig.add_subplot(gs[ax+k, j+1])
gen = SyntheticPSF(
amplitude_ranges=(-1, 1),
n_modes=n_modes,
lam_detection=wavelength,
psf_shape=3*[res],
x_voxel_size=x_voxel_size,
y_voxel_size=y_voxel_size,
z_voxel_size=z_voxel_size,
snr=20,
max_jitter=0,
cpu_workers=-1,
)
for mode in trange(5, n_modes):
for amp in waves:
phi = np.zeros(n_modes)
phi[mode] = amp
window, amps, snr, zplanes, maxcounts = gen.single_otf(
phi=phi,
zplanes=0,
normed=True,
noise=True,
augmentation=True,
meta=True,
midslice=True,
na_mask=True,
ratio=True,
padsize=padsize
)
abr = round(peak_aberration(phi) * np.sign(amp), 1)
grid[(mode, 0, amp)].set_title(f'{abr}$\\lambda$')
outdir = Path(f'{savepath}/i{res}_pad_{padsize}/mode_{mode}/ratios/')
outdir.mkdir(exist_ok=True, parents=True)
imsave(f"{outdir}/{str(abr).replace('.', 'p')}.tif", window)
for ax in range(6):
if amp == waves[-1]:
mat = grid[(mode, ax, 'wavefront')].contourf(
Wavefront(phi).wave(100),
levels=np.arange(-10, 10, step=1),
cmap='Spectral_r',
extend='both'
)
grid[(mode, ax, 'wavefront')].axis('off')
grid[(mode, ax, 'wavefront')].set_aspect('equal')
if window.shape[0] == 6:
vol = window[ax, :, :]
else:
vol = np.max(window, axis=ax)
m = grid[(mode, ax, amp)].imshow(
vol,
cmap=cmap if ax < 3 else 'Spectral_r',
vmin=vmin if ax < 3 else -1,
vmax=vmax if ax < 3 else 1,
)
grid[(mode, ax, amp)].set_aspect('equal')
grid[(mode, ax, amp)].axis('off')
cax = inset_axes(
grid[(mode, ax, waves[-1])],
width="10%",
height="100%",
loc='center right',
borderpad=-3
)
cb = plt.colorbar(m, cax=cax)
cax.yaxis.set_label_position("right")
plt.subplots_adjust(top=0.95, right=0.95, wspace=.2)
plt.savefig(f'{savepath}/i{res}_pad{padsize}.pdf', bbox_inches='tight', pad_inches=.25)
def plot_simulation(
res=64,
padsize=None,
n_modes=60,
wavelength=.605,
x_voxel_size=.15,
y_voxel_size=.15,
z_voxel_size=.6,
#savepath='../data/embeddings/seminar/x100-y100-z100',
savepath='../data/embeddings/seminar/x150-y150-z600',
):
from utils import peak_aberration
waves = np.round([-.2, -.1, -.05, .05, .1, .2], 3)
logger.info(waves)
gen = SyntheticPSF(
amplitude_ranges=(-1, 1),
n_modes=n_modes,
lam_detection=wavelength,
psf_shape=3*[res],
x_voxel_size=x_voxel_size,
y_voxel_size=y_voxel_size,
z_voxel_size=z_voxel_size,
snr=100,
max_jitter=0,
cpu_workers=-1,
)
outdir = Path(f'{savepath}/i{res}_pad_{padsize}/')
outdir.mkdir(exist_ok=True, parents=True)
ipsf = gen.theoretical_psf(normed=True)
imsave(f"{outdir}/theoretical_psf.tif", ipsf)
iotf, _ = gen.fft(ipsf, padsize=padsize)
imsave(f"{outdir}/theoretical_otf.tif", iotf)
for mode in trange(5, n_modes):
for amp in waves:
phi = np.zeros(n_modes)
phi[mode] = amp
abr = round(peak_aberration(phi) * np.sign(amp), 1)
# otf = gen.single_otf(
# phi=phi,
# zplanes=0,
# normed=True,
# noise=True,
# augmentation=True,
# midslice=False,
# na_mask=True,
# ratio=False,
# padsize=padsize,
# )
#
# amps = Path(f'{outdir}/mode_{mode}/amps')
# amps.mkdir(exist_ok=True, parents=True)
# imsave(f"{amps}/{str(abr).replace('.', 'p')}.tif", otf)
embedding = gen.single_otf(
phi=phi,
zplanes=0,
normed=True,
noise=True,
augmentation=True,
midslice=True,
na_mask=True,
ratio=True,
padsize=padsize,
)
emb = Path(f'{outdir}/mode_{mode}/embeddings')
emb.mkdir(exist_ok=True, parents=True)
imsave(f"{emb}/{str(abr).replace('.', 'p')}.tif", embedding)
psf = gen.single_psf(
phi=phi,
zplanes=0,
normed=True,
noise=True,
augmentation=True,
meta=False,
)
reals = Path(f'{outdir}/mode_{mode}/psfs')
reals.mkdir(exist_ok=True, parents=True)
imsave(f"{reals}/{str(abr).replace('.', 'p')}.tif", psf)
def plot_signal(n_modes=60, wavelength=.605):
plt.rcParams.update({
'font.size': 10,
'axes.titlesize': 12,
'axes.labelsize': 12,
'xtick.labelsize': 10,
'ytick.labelsize': 10,
'legend.fontsize': 10,
})
from preprocessing import center_crop
from utils import peak_aberration
waves = np.arange(0, .5, step=.05)
res = [32, 64, 96, 128, 192, 256]
logger.info(waves)
gen = SyntheticPSF(
amplitude_ranges=(-1, 1),
n_modes=n_modes,
lam_detection=wavelength,
psf_shape=(256, 256, 256),
x_voxel_size=.1,
y_voxel_size=.1,
z_voxel_size=.1,
snr=100,
max_jitter=0,
cpu_workers=-1,
)
signal = {}
for i in range(3, n_modes):
signal[i] = {}
for j, a in enumerate(tqdm(waves, desc=f'Mode [#{i}]')):
phi = np.zeros(n_modes)
phi[i] = a
w = Wavefront(phi, order='ansi')
abr = 0 if j == 0 else round(peak_aberration(phi))
signal[i][abr] = {}
psf = gen.single_psf(w, zplanes=0, normed=True, noise=False)
# psf_cmap = 'hot'
# fig, axes = plt.subplots(len(res), 4)
for k, r in enumerate(res):
window = center_crop(psf, crop_shape=tuple(3*[r]))
signal[i][abr][r] = np.sum(window)
# vol = window ** .5
# vol = np.nan_to_num(vol)
#
# axes[k, 0].bar(range(n_modes), height=w.amplitudes_ansi_waves)
# m = axes[k, 1].imshow(np.max(vol, axis=0), cmap=psf_cmap, vmin=0, vmax=1)
# axes[k, 2].imshow(np.max(vol, axis=1), cmap=psf_cmap, vmin=0, vmax=1)
# axes[k, 3].imshow(np.max(vol, axis=2), cmap=psf_cmap, vmin=0, vmax=1)
# plt.tight_layout()
# plt.show()
df = pd.DataFrame.from_dict(signal[i], orient="index")
logger.info(df)
total_energy = df[res[-1]].values
df = df.apply(lambda e: e/total_energy, axis=0)
logger.info(df)
theoretical = df.iloc[[0]].values[0]
rdf = df.apply(lambda row: abs(theoretical-row) / theoretical, axis=1)
logger.info(rdf)
fig = plt.figure(figsize=(8, 6))
gs = fig.add_gridspec(2, 3)
ax = fig.add_subplot(gs[0, :2])
axw = fig.add_subplot(gs[0, 2])
axr = fig.add_subplot(gs[1, :])
for r in res:
ax.plot(df[r], label=r)
ax.set_xlim((0, None))
ax.set_ylim((0, 1))
ax.set_ylabel('Signal')
ax.grid(True, which="both", axis='both', lw=.5, ls='--', zorder=0)
axr.plot(rdf[r], label=r)
axr.set_xlim((0, None))
axr.set_ylim((0, 1))
axr.set_xlabel(
'Peak-to-peak aberration $|P_{95} - P_{5}|$'
rf'($\lambda = {int(wavelength*1000)}~nm$)'
)
axr.set_ylabel('Percentage signal lost')
axr.grid(True, which="both", axis='both', lw=.5, ls='--', zorder=0)
axr.legend(frameon=False, loc='upper center', ncol=6)
phi = np.zeros(n_modes)
phi[i] = .5
phi = Wavefront(phi, order='ansi').wave(size=100)
mat = axw.contourf(
phi,
cmap='Spectral_r',
extend='both'
)
divider = make_axes_locatable(axw)
top = divider.append_axes("top", size='30%', pad=0.2)
top.hist(phi.flatten(), bins=phi.shape[0], color='grey')
top.set_yticks([])
top.xaxis.set_major_formatter(FormatStrFormatter("%.2f"))
top.spines['right'].set_visible(False)
top.spines['top'].set_visible(False)
top.spines['left'].set_visible(False)
axw.axis('off')
plt.tight_layout()
plt.savefig(f'../data/signal_res_mode_{i}.png', dpi=300, bbox_inches='tight', pad_inches=.25)
signal = pd.DataFrame.from_dict(signal, orient="index").stack().to_frame()
signal.index.names = ['index', 'waves']
signal = pd.concat([signal.drop([0], axis=1), signal[0].apply(pd.Series)], axis=1).reset_index()
logger.info(signal)
signal.to_csv('../data/signal.csv')
def plot_mode(savepath, df, mode_index, n_modes=60, wavelength=.605):
plt.rcParams.update({
'font.size': 10,
'axes.titlesize': 12,
'axes.labelsize': 12,
'xtick.labelsize': 10,
'ytick.labelsize': 10,
'legend.fontsize': 10,
})
fig = plt.figure(figsize=(8, 4))
gs = fig.add_gridspec(1, 3)
ax = fig.add_subplot(gs[0, :2])
axw = fig.add_subplot(gs[0, 2])
ax.plot(df)
ax.set_xlim((0, 12))
ax.set_yscale('log')
ax.set_ylim((10**-2, 10))
ax.set_xlabel(
'Peak-to-peak aberration $|P_{95} - P_{5}|$'
rf'($\lambda = {int(wavelength * 1000)}~nm$)'
)
ax.set_ylabel('Peak-to-peak residuals')
ax.grid(True, which="both", axis='both', lw=.5, ls='--', zorder=0)
phi = np.zeros(n_modes)
phi[mode_index] = .5
phi = Wavefront(phi, order='ansi').wave(size=100)
mat = axw.contourf(
phi,
cmap='Spectral_r',
extend='both'
)
divider = make_axes_locatable(axw)
top = divider.append_axes("top", size='30%', pad=0.2)
top.hist(phi.flatten(), bins=phi.shape[0], color='grey')
top.set_yticks([])
top.xaxis.set_major_formatter(FormatStrFormatter("%.2f"))
top.spines['right'].set_visible(False)
top.spines['top'].set_visible(False)
top.spines['left'].set_visible(False)
axw.axis('off')
plt.tight_layout()
plt.savefig(savepath, dpi=300, bbox_inches='tight', pad_inches=.25)
def plot_aberrations():
plt.rcParams.update({
'font.size': 10,
'axes.titlesize': 12,
'axes.labelsize': 12,
'xtick.labelsize': 10,
'ytick.labelsize': 10,
'legend.fontsize': 10,
})
fig, axes = plt.subplots(4, 4, figsize=(10, 8))
axes = axes.flatten()
for i in range(15):
ax = axes[i]
idx = i
w = Wavefront({idx: 1})
ax.set_title(f"{Zernike(idx).ansi_to_nm(idx)}")
mat = ax.imshow(w.wave(size=100), cmap='Spectral_r')
ax.axis('off')
plt.tight_layout()
plt.colorbar(mat)
plt.show()
def plot_psnr(psf_cmap='hot', gamma=.75):
plt.rcParams.update({
'font.size': 10,
'axes.titlesize': 12,
'axes.labelsize': 12,
'xtick.labelsize': 10,
'ytick.labelsize': 10,
'legend.fontsize': 10,
})
def psf_slice(xy, zx, zy, vol):
vol = vol ** gamma
vol = np.nan_to_num(vol)
mid_plane = vol.shape[0] // 2
# m = xy.imshow(vol[mid_plane, :, :], cmap=psf_cmap, vmin=0, vmax=1)
# zx.imshow(vol[:, mid_plane, :], cmap=psf_cmap, vmin=0, vmax=1)
# zy.imshow(vol[:, :, mid_plane], cmap=psf_cmap, vmin=0, vmax=1)
levels = np.arange(0, 1.01, .01)
m = xy.contourf(vol[mid_plane, :, :], cmap=psf_cmap, levels=levels, vmin=0, vmax=1)
zx.contourf(vol[:, mid_plane, :], cmap=psf_cmap, levels=levels, vmin=0, vmax=1)
zy.contourf(vol[:, :, mid_plane], cmap=psf_cmap, levels=levels, vmin=0, vmax=1)
cax = inset_axes(zy, width="10%", height="100%", loc='center right', borderpad=-2)
cb = plt.colorbar(m, cax=cax)
cax.yaxis.set_major_formatter(FormatStrFormatter("%.1f"))
return m
scales = sorted(set([int(t) for t in np.logspace(0, 2, num=8)]))
logger.info(f"PSNRs: {scales}")
fig, axes = plt.subplots(len(scales), 3, figsize=(8, 16))
for i, snr in tqdm(enumerate(scales), total=len(scales)):
psfargs = dict(
lam_detection=.605,
amplitude_ranges=0,
psf_shape=(64, 64, 64),
x_voxel_size=.1,
y_voxel_size=.1,
z_voxel_size=.1,
batch_size=10,
snr=snr,
max_jitter=0,
cpu_workers=-1,
)
psfs, ys, psnrs, zplanes, maxcounts = next(SyntheticPSF(**psfargs).generator(debug=True))
target_psnr = np.ceil(np.nanquantile(psnrs, .95))
psf_slice(
xy=axes[i, 0],
zx=axes[i, 1],
zy=axes[i, 2],
vol=psfs[np.random.randint(psfs.shape[0]), :, :, :, 0],
)
axes[i, 0].set_title(f'r-SNR: {snr}')
axes[i, 1].set_title(f"PSNR: {target_psnr:.2f}")
axes[i, 2].set_title(f"$\gamma$: {gamma:.2f}")
axes[-1, 0].set_xlabel('XY')
axes[-1, 1].set_xlabel('ZX')
axes[-1, 2].set_xlabel('ZY')
plt.tight_layout()
plt.subplots_adjust(top=0.95, right=0.95, wspace=.3, hspace=.3)
plt.savefig(f'../data/noise.png', dpi=300, bbox_inches='tight', pad_inches=.25)
def plot_dmodes(
psf: np.array,
gen: SyntheticPSF,
y: Wavefront,
pred: Wavefront,
save_path: Path,
wavelength: float = .605,
psf_cmap: str = 'hot',
gamma: float = .5,
threshold: float = .01,
):
def wavefront(iax, phi, levels, label=''):
mat = iax.contourf(
phi,
levels=levels,
cmap=wave_cmap,
vmin=np.min(levels),
vmax=np.max(levels),
extend='both'
)
iax.axis('off')
iax.set_title(label)
cax = inset_axes(iax, width="10%", height="100%", loc='center right', borderpad=-3)
cbar = fig.colorbar(
mat,
cax=cax,
fraction=0.046,
pad=0.04,
extend='both',
format=FormatStrFormatter("%.2g"),
)
cbar.ax.set_title(r'$\lambda$')
cbar.ax.yaxis.set_ticks_position('right')
cbar.ax.yaxis.set_label_position('left')
return mat
def psf_slice(xy, zx, zy, vol, label=''):
vol = vol ** gamma
vol = np.nan_to_num(vol)
if vol.shape[0] == 3:
m = xy.imshow(vol[0], cmap='Spectral_r', vmin=0, vmax=1)
zx.imshow(vol[1], cmap='Spectral_r', vmin=0, vmax=1)
zy.imshow(vol[2], cmap='Spectral_r', vmin=0, vmax=1)
else:
m = xy.imshow(np.max(vol, axis=0), cmap=psf_cmap, vmin=0, vmax=1)
zx.imshow(np.max(vol, axis=1), cmap=psf_cmap, vmin=0, vmax=1)
zy.imshow(np.max(vol, axis=2), cmap=psf_cmap, vmin=0, vmax=1)
cax = inset_axes(zy, width="10%", height="100%", loc='center right', borderpad=-3)
cb = plt.colorbar(m, cax=cax)
cax.yaxis.set_major_formatter(FormatStrFormatter("%.1f"))
cax.yaxis.set_label_position("right")
xy.set_ylabel(label)
return m
plt.rcParams.update({
'font.size': 10,
'axes.titlesize': 12,
'axes.labelsize': 12,
'xtick.labelsize': 10,
'ytick.labelsize': 10,
'legend.fontsize': 10,
'axes.autolimit_mode': 'round_numbers'
})
# plt.style.use("dark_background")
if len(psf.shape) > 3:
psf = np.squeeze(psf, axis=-1)
psf = np.squeeze(psf, axis=0)
y_wave = y.wave(size=100)
step = .25
vmax = round(np.max([
np.abs(round(np.nanquantile(y_wave, .1), 2)),
np.abs(round(np.nanquantile(y_wave, .9), 2))
]) * 4) / 4
vmax = .25 if vmax < threshold else vmax
highcmap = plt.get_cmap('magma_r', 256)
middlemap = plt.get_cmap('gist_gray', 256)
lowcmap = plt.get_cmap('gist_earth_r', 256)
ll = np.arange(-vmax, -.25 + step, step)
mm = [-.15, 0, .15]
hh = np.arange(.25, vmax + step, step)
mticks = np.concatenate((ll, mm, hh))
levels = np.vstack((
lowcmap(.66 * ll / ll.min()),
middlemap([.85, .95, 1, .95, .85]),
highcmap(.66 * hh / hh.max())
))
wave_cmap = mcolors.ListedColormap(levels)
fig = plt.figure(figsize=(15, 200))
gs = fig.add_gridspec(64, 4)
p_psf = gen.single_psf(pred, zplanes=0)
ax_xy = fig.add_subplot(gs[0, 0])
ax_xz = fig.add_subplot(gs[0, 1])
ax_yz = fig.add_subplot(gs[0, 2])
ax_w = fig.add_subplot(gs[0, 3])
psf_slice(ax_xy, ax_xz, ax_yz, p_psf, label='Prediction')
wavefront(ax_w, pred.wave(size=100), label='Prediction', levels=mticks)
ax_xy = fig.add_subplot(gs[1, 0])
ax_xz = fig.add_subplot(gs[1, 1])
ax_yz = fig.add_subplot(gs[1, 2])
ax_w = fig.add_subplot(gs[1, 3])
psf_slice(ax_xy, ax_xz, ax_yz, psf, label='PSF (maxproj)')
wavefront(ax_w, y_wave, label='Ground truth', levels=mticks)
otf = np.squeeze(embedding(np.expand_dims(psf, -1), ishape=psf.shape[-1]), axis=-1)
ax_xy = fig.add_subplot(gs[2, 0])
ax_xz = fig.add_subplot(gs[2, 1])
ax_yz = fig.add_subplot(gs[2, 2])
ax_w = fig.add_subplot(gs[2, 3])
psf_slice(ax_xy, ax_xz, ax_yz, otf, label='R_rel')
wavefront(ax_w, y_wave, label='Ground truth', levels=mticks)
k = 0
for i, w in enumerate(y.amplitudes_ansi_waves):
k += 1
phi = np.zeros(60)
phi[i] = w / (2 * np.pi / wavelength)
phi = Wavefront(phi, order='ansi')
psf = gen.single_psf(phi, zplanes=0)
otf = np.squeeze(embedding(np.expand_dims(psf, -1), ishape=psf.shape[-1]), axis=-1)
ax_xy = fig.add_subplot(gs[2+k, 0])
ax_xz = fig.add_subplot(gs[2+k, 1])
ax_yz = fig.add_subplot(gs[2+k, 2])
ax_w = fig.add_subplot(gs[2+k, 3])
psf_slice(ax_xy, ax_xz, ax_yz, otf, label=f'Mode #{i}')
wavefront(ax_w, phi.wave(100), label=f'Mode #{i}', levels=mticks)
ax_zcoff = fig.add_subplot(gs[-1, :])
ax_zcoff.plot(pred.amplitudes_ansi_waves, '-o', color='C0', label='Predictions')
ax_zcoff.plot(y.amplitudes_ansi_waves, '-o', color='C1', label='Ground truth')
ax_zcoff.legend(frameon=False, loc='upper center', bbox_to_anchor=(.1, 1))
ax_zcoff.set_xticks(range(len(pred.amplitudes_ansi_waves)))
ax_zcoff.set_ylabel(f'Amplitudes\n($\lambda = {wavelength}~\mu m$)')
ax_zcoff.spines['top'].set_visible(False)
ax_zcoff.set_xlim((0, len(pred.amplitudes_ansi_waves)))
ax_zcoff.grid(True, which="both", axis='both', lw=1, ls='--', zorder=0)
plt.subplots_adjust(top=0.95, right=0.95, wspace=.2)
plt.savefig(f'{save_path}.png', dpi=300, bbox_inches='tight', pad_inches=.25)
plt.savefig(f'{save_path}.pdf', bbox_inches='tight', pad_inches=.25)
def diagnostic_assessment(
psf: np.array,
gt_psf: np.array,
predicted_psf: np.array,
corrected_psf: np.array,
psnr: Any,
maxcounts: Any,
y: Wavefront,
pred: Wavefront,
save_path: Path,
wavelength: float = .605,
display: bool = False,
psf_cmap: str = 'hot',
gamma: float = .5,
threshold: float = .01,
):
def wavefront(iax, phi, levels, label=''):
mat = iax.contourf(
phi,
levels=levels,
cmap=wave_cmap,
vmin=np.min(levels),
vmax=np.max(levels),
extend='both'
)
divider = make_axes_locatable(iax)
top = divider.append_axes("top", size='30%', pad=0.2)
top.hist(phi.flatten(), bins=phi.shape[0], color='grey')
top.set_title(label)
top.set_yticks([])
top.xaxis.set_major_formatter(FormatStrFormatter("%.2f"))
top.spines['right'].set_visible(False)
top.spines['top'].set_visible(False)
top.spines['left'].set_visible(False)
return mat
def psf_slice(xy, zx, zy, vol, label=''):
if vol.shape[0] == 6:
vmin, vmax, vcenter, step = 0, 2, 1, .1
highcmap = plt.get_cmap('YlOrRd', 256)
lowcmap = plt.get_cmap('YlGnBu_r', 256)
low = np.linspace(0, 1 - step, int(abs(vcenter - vmin) / step))
high = np.linspace(0, 1 + step, int(abs(vcenter - vmax) / step))
cmap = np.vstack((lowcmap(low), [1, 1, 1, 1], highcmap(high)))
cmap = mcolors.ListedColormap(cmap)
inner = gridspec.GridSpecFromSubplotSpec(1, 2, subplot_spec=xy, wspace=0.1, hspace=0.1)
ax = fig.add_subplot(inner[0])
ax.imshow(vol[0], cmap=cmap, vmin=vmin, vmax=vmax)
ax.set_xticks([])
ax.set_yticks([])
ax.set_ylabel('Input')
ax.set_xlabel(r'$\alpha = |\tau| / |\hat{\tau}|$')
ax = fig.add_subplot(inner[1])
ax.imshow(vol[3], cmap='coolwarm', vmin=-1, vmax=1)
ax.set_xticks([])
ax.set_yticks([])
ax.set_xlabel(r'$\phi = G_{\sigma=3}(\angle \tau)$')
xy.axis('off')
inner = gridspec.GridSpecFromSubplotSpec(1, 2, subplot_spec=zx, wspace=0.1, hspace=0.1)
ax = fig.add_subplot(inner[0])
ax.imshow(vol[1], cmap=cmap, vmin=vmin, vmax=vmax)
ax.set_xticks([])
ax.set_yticks([])
ax.set_xlabel(r'$\alpha = |\tau| / |\hat{\tau}|$')
ax = fig.add_subplot(inner[1])
ax.imshow(vol[4], cmap='coolwarm', vmin=-1, vmax=1)
ax.set_xticks([])
ax.set_yticks([])
ax.set_xlabel(r'$\phi = G_{\sigma=3}(\angle \tau)$')
zx.axis('off')
inner = gridspec.GridSpecFromSubplotSpec(1, 2, subplot_spec=zy, wspace=0.1, hspace=0.1)
ax = fig.add_subplot(inner[0])
m = ax.imshow(vol[2], cmap=cmap, vmin=vmin, vmax=vmax)
ax.set_xticks([])
ax.set_yticks([])
ax.set_xlabel(r'$\alpha = |\tau| / |\hat{\tau}|$')
ax = fig.add_subplot(inner[1])
ax.imshow(vol[5], cmap='coolwarm', vmin=-1, vmax=1)
ax.set_xticks([])
ax.set_yticks([])
ax.set_xlabel(r'$\phi = G_{\sigma=3}(\angle \tau)$')
zy.axis('off')
cax = inset_axes(zy, width="10%", height="100%", loc='center right', borderpad=-3)
cb = plt.colorbar(m, cax=cax)
cax.yaxis.set_major_formatter(FormatStrFormatter("%.1f"))
cax.yaxis.set_label_position("right")
elif vol.shape[0] == 3:
m = xy.imshow(vol[0], cmap='Spectral_r', vmin=0, vmax=1)
zx.imshow(vol[1], cmap='Spectral_r', vmin=0, vmax=1)
zy.imshow(vol[2], cmap='Spectral_r', vmin=0, vmax=1)
else:
vol = vol ** gamma
vol = np.nan_to_num(vol)
m = xy.imshow(np.max(vol, axis=0), cmap=psf_cmap, vmin=0, vmax=1)
zx.imshow(np.max(vol, axis=1), cmap=psf_cmap, vmin=0, vmax=1)
zy.imshow(np.max(vol, axis=2), cmap=psf_cmap, vmin=0, vmax=1)
cax = inset_axes(zy, width="10%", height="100%", loc='center right', borderpad=-3)
cb = plt.colorbar(m, cax=cax)
cax.yaxis.set_major_formatter(FormatStrFormatter("%.1f"))
cax.set_ylabel(f"$\gamma$: {gamma:.2f}")
cax.yaxis.set_label_position("right")
xy.set_ylabel(label)
return m
plt.rcParams.update({
'font.size': 10,
'axes.titlesize': 12,
'axes.labelsize': 12,
'xtick.labelsize': 10,
'ytick.labelsize': 10,
'legend.fontsize': 10,
'axes.autolimit_mode': 'round_numbers'
})
# plt.style.use("dark_background")
if len(psf.shape) > 3:
psf = np.squeeze(psf, axis=-1)
psf = np.squeeze(psf, axis=0)
if not np.isscalar(psnr):
psnr = psnr[0]
if not np.isscalar(maxcounts):
maxcounts = maxcounts[0]
y_wave = y.wave(size=100)
pred_wave = pred.wave(size=100)
diff = y_wave - pred_wave
fig = plt.figure(figsize=(13, 20))
gs = fig.add_gridspec(6 if gt_psf is None else 7, 3)
ax_gt = fig.add_subplot(gs[:2, 0])
ax_pred = fig.add_subplot(gs[:2, 1])
ax_diff = fig.add_subplot(gs[:2, 2])
cax = fig.add_axes([.99, 0.725, 0.02, .175])
ax_xy = fig.add_subplot(gs[2, 0])
ax_xz = fig.add_subplot(gs[2, 1])
ax_yz = fig.add_subplot(gs[2, 2])
ax_pxy = fig.add_subplot(gs[3, 0])
ax_pxz = fig.add_subplot(gs[3, 1])
ax_pyz = fig.add_subplot(gs[3, 2])
ax_cxy = fig.add_subplot(gs[-2, 0])
ax_cxz = fig.add_subplot(gs[-2, 1])
ax_cyz = fig.add_subplot(gs[-2, 2])
ax_zcoff = fig.add_subplot(gs[-1, :])
step = .25
vmax = round(np.max([
np.abs(round(np.nanquantile(y_wave, .1), 2)),
np.abs(round(np.nanquantile(y_wave, .9), 2))
]) * 4) / 4
vmax = .25 if vmax < threshold else vmax
highcmap = plt.get_cmap('magma_r', 256)
middlemap = plt.get_cmap('gist_gray', 256)
lowcmap = plt.get_cmap('gist_earth_r', 256)
ll = np.arange(-vmax, -.25 + step, step)
mm = [-.15, 0, .15]
hh = np.arange(.25, vmax + step, step)
mticks = np.concatenate((ll, mm, hh))
levels = np.vstack((
lowcmap(.66 * ll / ll.min()),
middlemap([.85, .95, 1, .95, .85]),
highcmap(.66 * hh / hh.max())
))
wave_cmap = mcolors.ListedColormap(levels)
mat = wavefront(ax_gt, y_wave, label='Ground truth', levels=mticks)
wavefront(ax_pred, pred_wave, label='Predicted', levels=mticks)
wavefront(ax_diff, diff, label='Residuals', levels=mticks)
cbar = fig.colorbar(
mat,
cax=cax,
fraction=0.046,
pad=0.04,
extend='both',
format=FormatStrFormatter("%.2g"),
# spacing='proportional',
)
cbar.ax.set_title(r'$\lambda$')
cbar.ax.set_ylabel(f'$\lambda = {wavelength}~\mu m$')
cbar.ax.yaxis.set_ticks_position('right')
cbar.ax.yaxis.set_label_position('left')
ax_xy.set_title('XY')
ax_xz.set_title('ZX')
ax_yz.set_title('ZY')
ax_xz.set_ylabel(f"PSNR: {psnr:.2f}")
ax_yz.set_ylabel(f"Max photon count: {maxcounts:.0f}")
psf_slice(ax_xy, ax_xz, ax_yz, psf, label='Input (maxproj)')
psf_slice(ax_pxy, ax_pxz, ax_pyz, predicted_psf, label='Predicted')
psf_slice(ax_cxy, ax_cxz, ax_cyz, corrected_psf, label='Corrected')
if gt_psf is not None:
ax_xygt = fig.add_subplot(gs[-3, 0])
ax_xzgt = fig.add_subplot(gs[-3, 1])
ax_yzgt = fig.add_subplot(gs[-3, 2])
psf_slice(ax_xygt, ax_xzgt, ax_yzgt, gt_psf, label='Validation')
# ax_zcoff.set_title('Zernike modes')
ax_zcoff.plot(pred.amplitudes_ansi_waves, '-o', color='C0', label='Predictions')
ax_zcoff.plot(y.amplitudes_ansi_waves, '-o', color='C1', label='Ground truth')
ax_zcoff.legend(frameon=False, loc='upper center', bbox_to_anchor=(.1, 1))
ax_zcoff.set_xticks(range(len(pred.amplitudes_ansi_waves)))
ax_zcoff.set_ylabel(f'Amplitudes\n($\lambda = {wavelength}~\mu m$)')
ax_zcoff.spines['top'].set_visible(False)
error = 100 * np.abs(y.amplitudes_ansi_waves - pred.amplitudes_ansi_waves) / np.abs(y.amplitudes_ansi_waves)
ax_error = ax_zcoff.twinx()
ax_error.set_ylabel(f'MAPE = {np.mean(error[np.isfinite(error)]):.2f}%', color='darkgrey')
ax_error.tick_params(axis='y', labelcolor='darkgrey')
ax_error.set_ylim(0, 100)
ax_error.bar(range(len(pred.zernikes)), error, color='darkgrey', alpha=.5)
ax_error.yaxis.set_major_formatter(mtick.PercentFormatter())
ax_error.grid(True, which="both", axis='both', lw=1, ls='--', zorder=0)
ax_error.spines['top'].set_visible(False)
if len(pred.zernikes) < 20:
xticks = [f"$\\alpha$={z.index_ansi}\n$j$={z.index_noll}\n$n$={z.n}\n$m$={z.m}" for z in pred.zernikes]
else:
xticks = [z.index_ansi for z in pred.zernikes]
ax_error.set_xticklabels(xticks)
for ax in [ax_gt, ax_pred, ax_diff]:
ax.axis('off')
plt.subplots_adjust(top=0.95, right=0.95, wspace=.2)
plt.savefig(f'{save_path}.pdf', bbox_inches='tight', pad_inches=.25)
plt.savefig(f'{save_path}.png', dpi=300, bbox_inches='tight', pad_inches=.25)
if display:
plt.tight_layout()
plt.show()
def plot_residuals(df: pd.DataFrame, save_path, wavelength=.605, nsamples=100, label=''):
plt.rcParams.update({
'font.size': 10,
'axes.titlesize': 12,
'axes.labelsize': 12,
'xtick.labelsize': 10,
'ytick.labelsize': 10,
'legend.fontsize': 10,
})
fig, ax = plt.subplots(figsize=(8, 6))
df = df.drop(df.index[[0, 1, 2, 4]])
mean = np.mean(df[np.isfinite(df)], axis=0)
stdv = np.std(df[np.isfinite(df)], axis=0)
ax.errorbar(
x=df.columns.values, y=mean, yerr=stdv,
ecolor='lightgrey', lw=2,
label=r'Mean $\pm$ stdev'
)
ax.grid(True, which="both", axis='both', lw=.5, ls='--', zorder=0)
ax.set_yscale('log')
ax.set_ylim((0.01, .5))
ax.spines['top'].set_visible(False)
ax.set_xlabel(label)
ax.set_xscale('log', subsx=[2, 4, 6, 8])
ax.xaxis.set_major_formatter(FormatStrFormatter("%d"))
ax.xaxis.set_minor_formatter(FormatStrFormatter("%d"))
ax.set_xlim(10 ** 1, 10 ** 3)
ax.grid(True, which="both", axis='both', lw=.5, ls='--', zorder=0)
ax.set_yticks([.01, 0.02, .03, .05, .07, .15, .2, .3, .4], minor=True)
ax.yaxis.set_major_formatter(FormatStrFormatter("%.2f"))
ax.yaxis.set_minor_formatter(FormatStrFormatter("%.2f"))
ax.tick_params(axis='x', which='major', pad=10)
divider = make_axes_locatable(ax)
axl = divider.append_axes("top", size=2.0, pad=0, sharex=ax)
axl.errorbar(
x=df.columns.values, y=mean, yerr=stdv,
ecolor='lightgrey', lw=2,
)
axl.set_xscale('linear')
axl.set_ylim((0.5, 3))
axl.spines['bottom'].set_visible(False)
axl.xaxis.set_ticks_position('top')
axl.yaxis.set_major_formatter(FormatStrFormatter("%.2f"))
axl.grid(True, which="major", axis='both', lw=.5, ls='--', zorder=0)
axl.set_xscale('log', subsx=[2, 3, 4, 6, 8])
axl.xaxis.set_major_formatter(FormatStrFormatter("%d"))
axl.xaxis.set_minor_formatter(FormatStrFormatter("%d"))
axl.tick_params(axis='x', which='major', pad=10)
axl.set_xlim(10 ** 1, 10 ** 3)
axl.grid(True, which="both", axis='both', lw=.5, ls='--', zorder=0)
ax.set_ylabel(rf'Peak-to-peak residuals')
axl.set_ylabel(rf'($n$ = {nsamples}; $\lambda = {wavelength}~\mu m$)')
ax.legend(frameon=False, loc='lower center')
plt.tight_layout()
plt.savefig(f'{save_path}.pdf', bbox_inches='tight', pad_inches=.25)
plt.savefig(f'{save_path}.png', dpi=300, bbox_inches='tight', pad_inches=.25)
def plot_mae_amps(df: pd.DataFrame, save_path, wavelength=.605):
plt.rcParams.update({
'font.size': 10,
'axes.titlesize': 12,
'axes.labelsize': 12,
'xtick.labelsize': 10,
'ytick.labelsize': 10,
'legend.fontsize': 10,
})
fig, ax = plt.subplots(figsize=(8, 6))
ax.plot(df.index, df['mae'], color='k')
ax.grid(True, which="major", axis='both', lw=1, ls='--', zorder=0)
ax.legend(frameon=False, loc='upper right')
ax.set_ylabel(f'MAE ($\lambda = {wavelength}~\mu m$)')
ax.set_xlabel(f'Amplitudes\n($\lambda = {wavelength}~\mu m$)')
plt.tight_layout()
plt.savefig(f'{save_path}.pdf', bbox_inches='tight', pad_inches=.25)
plt.savefig(f'{save_path}.png', dpi=300, bbox_inches='tight', pad_inches=.25)
def plot_eval(means: pd.DataFrame, save_path, wavelength=.605, nsamples=100, label=''):
plt.rcParams.update({
'font.size': 10,
'axes.titlesize': 12,
'axes.labelsize': 12,
'xtick.labelsize': 10,
'ytick.labelsize': 10,
'legend.fontsize': 10,
'xtick.major.pad': 10
})
# fig = plt.figure(figsize=(8, 6))
# ax = fig.gca(projection="3d")
fig, ax = plt.subplots(figsize=(8, 6))
levels = [
.15, .175, .2, .225,
.25, .3, .35, .4, .45,
.5, .6, .7, .8, .9,
1, 1.25, 1.5, 1.75, 2.
]
vmin, vmax, vcenter, step = 0, 2, .1, .01
highcmap = plt.get_cmap('magma_r', 256)
lowcmap = plt.get_cmap('GnBu_r', 256)
low = np.linspace(0, 1 - step, int(abs(vcenter - vmin) / step))
high = np.linspace(0, 1 + step, int(abs(vcenter - vmax) / step))
cmap = np.vstack((lowcmap(low), [1, 1, 1, 1], highcmap(high)))
cmap = mcolors.ListedColormap(cmap)
contours = ax.contourf(
means.columns.values,
means.index.values,
means.values,
cmap=cmap,
levels=levels,
extend='both',
linewidths=2,
linestyles='dashed',
)
# ax.clabel(contours, contours.levels, inline=True, fontsize=10, colors='k')
cax = fig.add_axes([1, 0.08, 0.03, 0.87])
cbar = plt.colorbar(
contours,
cax=cax,
fraction=0.046,
pad=0.04,
extend='both',
spacing='proportional',
format=FormatStrFormatter("%.2f")
)
cbar.ax.set_ylabel(
'Peak-to-peak aberration $|P_{95} - P_{5}|$'
rf'($\lambda = {int(wavelength*1000)}~nm$)'
)
cbar.ax.set_title(r'$\lambda$')
cbar.ax.yaxis.set_ticks_position('right')
cbar.ax.yaxis.set_label_position('left')
ax.set_xlabel(f'Peak signal-to-noise ratio')
ax.set_xscale('log')
ax.xaxis.set_major_formatter(FormatStrFormatter("%d"))
ax.xaxis.set_minor_formatter(FormatStrFormatter("%d"))
ax.set_xlim(10 ** 0, 10 ** 2)
ax.grid(True, which="both", axis='both', lw=.25, ls='--', zorder=0)
if 'amplitude' in label:
ax.set_ylabel(
'Peak-to-peak aberration $|P_{95} - P_{5}|$'
rf'($\lambda = {int(wavelength*1000)}~nm$)'
)
ax.set_yticks(np.arange(0, 11, .5), minor=True)
ax.set_yticks(np.arange(0, 11, 1))
ax.set_ylim(.25, 10)
else:
ax.set_ylabel(f"{label.replace('_', ' ').capitalize()} ($\mu m$)")
plt.tight_layout()
plt.savefig(f'{save_path}.pdf', bbox_inches='tight', pad_inches=.25)
plt.savefig(f'{save_path}.png', dpi=300, bbox_inches='tight', pad_inches=.25)
def prediction(
psf: np.array,
predicted_psf: np.array,
pred: Wavefront,
dm_before: np.array,
dm_after: np.array,
save_path: Path,
wavelength: float = .605,
psf_cmap: str = 'hot',
gamma: float = .5,
threshold: float = .01,
):
def wavefront(iax, phi, levels):
mat = iax.contourf(
phi,
levels=levels,
cmap=wave_cmap,
vmin=np.min(levels),
vmax=np.max(levels),
extend='both'
)
divider = make_axes_locatable(iax)
top = divider.append_axes("top", size='30%', pad=0.2)
top.hist(phi.flatten(), bins=phi.shape[0], color='grey')
top.set_yticks([])
top.xaxis.set_major_formatter(FormatStrFormatter("%.2f"))
top.spines['right'].set_visible(False)
top.spines['top'].set_visible(False)
top.spines['left'].set_visible(False)
iax.axis('off')
return mat
def psf_slice(xy, zx, zy, vol, label='', maxx=True):
vol = vol ** gamma
vol = np.nan_to_num(vol)
if maxx:
m = xy.imshow(np.max(vol, axis=0), cmap=psf_cmap, vmin=0, vmax=1)
zx.imshow(np.max(vol, axis=1), cmap=psf_cmap, vmin=0, vmax=1)
zy.imshow(np.max(vol, axis=2), cmap=psf_cmap, vmin=0, vmax=1)
else:
mid_plane = vol.shape[0] // 2
m = xy.imshow(vol[mid_plane, :, :], cmap=psf_cmap, vmin=0, vmax=1)
zx.imshow(vol[:, mid_plane, :], cmap=psf_cmap, vmin=0, vmax=1)
zy.imshow(vol[:, :, mid_plane], cmap=psf_cmap, vmin=0, vmax=1)
cax = inset_axes(zy, width="10%", height="100%", loc='center right', borderpad=-3)
cb = plt.colorbar(m, cax=cax)
cax.yaxis.set_major_formatter(FormatStrFormatter("%.1f"))
cax.set_ylabel(f"$\gamma$: {gamma:.2f}")
cax.yaxis.set_label_position("right")
xy.set_ylabel(label)
return m
plt.rcParams.update({
'font.size': 10,
'axes.titlesize': 12,
'axes.labelsize': 12,
'xtick.labelsize': 10,
'ytick.labelsize': 10,
'legend.fontsize': 10,
'axes.autolimit_mode': 'round_numbers'
})
if len(psf.shape) > 3:
psf = np.squeeze(psf, axis=-1)
psf = np.squeeze(psf, axis=0)
pred_wave = pred.wave(size=257)
fig = plt.figure(figsize=(8, 11))
gs = fig.add_gridspec(7, 3)
ax_ixy = fig.add_subplot(gs[0, 0])
ax_ixz = fig.add_subplot(gs[0, 1])
ax_iyz = fig.add_subplot(gs[0, 2])
ax_mxy = fig.add_subplot(gs[1, 0])
ax_mxz = fig.add_subplot(gs[1, 1])
ax_myz = fig.add_subplot(gs[1, 2])
ax_pxy = fig.add_subplot(gs[2, 0])
ax_pxz = fig.add_subplot(gs[2, 1])
ax_pyz = fig.add_subplot(gs[2, 2])
ax_acts = fig.add_subplot(gs[3:5, :2])
ax_wavefornt = fig.add_subplot(gs[3:5, 2])
cax = inset_axes(ax_wavefornt, width="10%", height="100%", loc='center right', borderpad=-4)
ax_zcoff = fig.add_subplot(gs[-2:, :])
step = .25
vmax = round(np.max([
np.abs(round(np.nanquantile(pred_wave, .1), 2)),
np.abs(round(np.nanquantile(pred_wave, .9), 2))
]) * 4) / 4
vmax = .25 if vmax < threshold else vmax
highcmap = plt.get_cmap('magma_r', 256)
middlemap = plt.get_cmap('gist_gray', 256)
lowcmap = plt.get_cmap('gist_earth_r', 256)
ll = np.arange(-vmax, -.25 + step, step)
mm = [-.15, 0, .15]
hh = np.arange(.25, vmax + step, step)
mticks = np.concatenate((ll, mm, hh))
levels = np.vstack((
lowcmap(.66 * ll / ll.min()),
middlemap([.85, .95, 1, .95, .85]),
highcmap(.66 * hh / hh.max())
))
wave_cmap = mcolors.ListedColormap(levels)
ax_ixy.set_title('XY')
ax_ixz.set_title('ZX')
ax_iyz.set_title('ZY')
psf_slice(ax_ixy, ax_ixz, ax_iyz, psf, label=r'Input (max)', maxx=True)
psf_slice(ax_mxy, ax_mxz, ax_myz, psf, label=r'Input (middle)')
psf_slice(ax_pxy, ax_pxz, ax_pyz, predicted_psf, label='Predicted')
mat = wavefront(ax_wavefornt, pred_wave, levels=mticks)
cbar = fig.colorbar(
mat,
cax=cax,
fraction=0.046,
pad=0.04,
extend='both',
format=FormatStrFormatter("%.2g"),
# spacing='proportional',
)
cbar.ax.set_title(r'$\lambda$')
cbar.ax.set_ylabel(f'$\lambda = {wavelength}~\mu m$')
cbar.ax.yaxis.set_ticks_position('right')
cbar.ax.yaxis.set_label_position('left')
ax_acts.plot(dm_before, ls='--', color='C1', label='Current')
ax_acts.plot(dm_after, color='C0', label='Predictions')
ax_acts.set_xlim(0, 68)
ax_acts.set_ylabel(f'DM actuators (volts)')
ax_acts.legend(frameon=False, loc='lower right')
ax_acts.grid(True, which="both", axis='both', lw=1, ls='--', zorder=0)
ax_acts.axhline(0, ls='--', color='k', alpha=.5)
ax_zcoff.plot([], [], ' ', label=r"$\alpha$: ANSI index")
ax_zcoff.plot([], [], ' ', label=r"$j$: Noll index")
ax_zcoff.plot(pred.amplitudes_ansi_waves, color='dimgrey', label='Predictions')
ax_zcoff.legend(frameon=False, loc='lower left')
ax_zcoff.set_xticks(range(len(pred.amplitudes_ansi_waves)))
ax_zcoff.set_ylabel(f'Amplitudes ($\lambda = {wavelength}~\mu m$)')
ax_zcoff.spines['top'].set_visible(False)
ax_zcoff.grid(True, which="both", axis='y', lw=1, ls='--', zorder=0)
ax_zcoff.spines['top'].set_visible(False)
xticks = [f"$\\alpha$={z.index_ansi}\n$j$={z.index_noll}\n$n$={z.n}\n$m$={z.m}" for z in pred.zernikes]
ax_zcoff.set_xticklabels(xticks)
ax_zcoff.set_xlim(0, 14)
ax_zcoff.axhline(0, ls='--', color='r', alpha=.5)
plt.subplots_adjust(top=0.95, right=0.95, wspace=.2)
pupil_displacement = np.array(pred_wave, dtype='float32')
imsave(f"{save_path}_pupil_displacement.tif", pupil_displacement)
plt.savefig(f'{save_path}.pdf', bbox_inches='tight', pad_inches=.25)
plt.savefig(f'{save_path}.png', dpi=300, bbox_inches='tight', pad_inches=.25)
def plot_models(df: pd.DataFrame, save_path, wavelength=.605, nsamples=100, label=''):
plt.rcParams.update({
'font.size': 10,
'axes.titlesize': 12,
'axes.labelsize': 12,
'xtick.labelsize': 10,
'ytick.labelsize': 10,
'legend.fontsize': 10,
})
fig, ax = plt.subplots(figsize=(8, 6))
for model in df.columns.values:
ax.plot(df[model], label=model)
ax.grid(True, which="both", axis='both', lw=.5, ls='--', zorder=0)
ax.set_yscale('log')
ax.set_ylim((0.01, .5))
ax.spines['top'].set_visible(False)
ax.set_xlabel(label)
ax.set_xscale('log', subsx=[2, 3, 4, 5, 6, 8])
ax.xaxis.set_major_formatter(FormatStrFormatter("%d"))
ax.xaxis.set_minor_formatter(FormatStrFormatter("%d"))
ax.set_xlim(10 ** 1, 10 ** 2)
ax.grid(True, which="both", axis='both', lw=.5, ls='--', zorder=0)
ax.set_yticks([.01, 0.02, .03, .05, .07, .15, .2, .3, .4], minor=True)
ax.yaxis.set_major_formatter(FormatStrFormatter("%.2f"))
ax.yaxis.set_minor_formatter(FormatStrFormatter("%.2f"))
ax.tick_params(axis='x', which='major', pad=10)
divider = make_axes_locatable(ax)
axl = divider.append_axes("top", size=2.0, pad=0, sharex=ax)
for model in df.columns:
axl.plot(df[model], label=model)
axl.set_xscale('linear')
axl.set_ylim((0.5, 3))
axl.spines['bottom'].set_visible(False)
axl.xaxis.set_ticks_position('top')
axl.yaxis.set_major_formatter(FormatStrFormatter("%.2f"))
axl.grid(True, which="major", axis='both', lw=.5, ls='--', zorder=0)
axl.set_xscale('log', subsx=[2, 3, 4, 5, 6, 8])
axl.xaxis.set_major_formatter(FormatStrFormatter("%d"))
axl.xaxis.set_minor_formatter(FormatStrFormatter("%d"))
axl.tick_params(axis='x', which='major', pad=10)
axl.set_xlim(10 ** 1, 10 ** 2)
axl.grid(True, which="both", axis='both', lw=.5, ls='--', zorder=0)
ax.set_ylabel(rf'Peak-to-peak residuals')
axl.set_ylabel(rf'($n$ = {nsamples}; $\lambda = {wavelength}~\mu m$)')
ax.legend(frameon=False, loc='lower center', ncol=df.shape[1] // 2)
plt.tight_layout()
plt.savefig(f'{save_path}.pdf', bbox_inches='tight', pad_inches=.25)
plt.savefig(f'{save_path}.png', dpi=300, bbox_inches='tight', pad_inches=.25)
def plot_residuals_per_mode(df: pd.DataFrame, save_path, wavelength=.605, nsamples=100):
plt.rcParams.update({
'font.size': 10,
'axes.titlesize': 12,
'axes.labelsize': 12,
'xtick.labelsize': 10,
'ytick.labelsize': 10,
'legend.fontsize': 10,
})
order = pd.concat(
[
df.mean(axis=1).to_frame('mean'),
df.sum(axis=1).to_frame('sum'),
df.std(axis=1).to_frame('std'),
df.median(axis=1).to_frame('median'),
df
],
axis=1
)
order = order.groupby('model')['mean', 'std', 'median', 'sum'].mean().sort_values('mean')
logger.info(order)
fig, axes = plt.subplots(nrows=order.shape[0], figsize=(df.shape[1] / 2, 20), sharex='all')
for i, (model, row) in enumerate(order.iterrows()):
axes[i].set_title(model)
g = sns.boxplot(
ax=axes[i],
data=df[df.model == model],
orient='v',
palette="Set3",
)
g.set(ylim=(0, 2))
axes[i].grid(True, which="both", axis='both', lw=.5, ls='--', zorder=0)
axes[i].axhline(.25, color='r')
props = dict(boxstyle='round', facecolor='wheat', alpha=0.5)
text = '\n'.join((
rf"$\mu={round(row['mean'], 4)}$",
rf"$\sigma={round(row['std'], 4)}$",
rf"$m={round(row['median'], 4)}$",
rf"$\Sigma={round(row['sum'], 4)}$",
))
axes[i].text(0.025, 0.95, text, transform=axes[i].transAxes, va='top', bbox=props)
axes[i].set_ylabel(
'Residuals\n'
rf'($n$ = {nsamples}; $\lambda = {wavelength}~\mu m$)'
)
plt.tight_layout()
plt.savefig(f'{save_path}.pdf', bbox_inches='tight', pad_inches=.25)
plt.savefig(f'{save_path}.png', dpi=300, bbox_inches='tight', pad_inches=.25)
def plot_convergence(df: pd.DataFrame, save_path, wavelength=.605, nsamples=100, psnr=30):
plt.rcParams.update({
'font.size': 10,
'axes.titlesize': 12,
'axes.labelsize': 12,
'xtick.labelsize': 10,
'ytick.labelsize': 10,
'legend.fontsize': 10,
})
fig, ax = plt.subplots(figsize=(8, 6))
for model in df['model'].unique():
x = df[df['model'] == model]['niter'].values
y = df[df['model'] == model]['residuals'].values
ax.plot(x, y, label=model)
ax.grid(True, which="both", axis='both', lw=.5, ls='--', zorder=0)
ax.set_yscale('log')
ax.set_ylim((0.01, 10))
ax.set_xlim((0, df['niter'].nunique()))
ax.set_xticks(range(df['niter'].nunique()))
ax.spines['top'].set_visible(False)
ax.set_xlabel('Number of iterations')
ax.grid(True, which="both", axis='both', lw=.5, ls='--', zorder=0)
ax.set_yticks([
.01, 0.02, .03, .05, .07, .15,
.25, .5, .75, 1, 1.5,
2, 3, 4, 6, 8, 10
], minor=True)
ax.xaxis.set_major_formatter(FormatStrFormatter("%d"))
ax.yaxis.set_major_formatter(FormatStrFormatter("%.2f"))
ax.yaxis.set_minor_formatter(FormatStrFormatter("%.2f"))
ax.tick_params(axis='x', which='major', pad=10)
#ax.set_title(f"PSNR: {psnr}, $n$ = {nsamples}")
ax.set_ylabel(rf'Average peak-to-peak residuals ($\lambda = {round(wavelength*1000)}~nm$)')
ax.legend(frameon=False, loc='lower center', ncol=4)
plt.tight_layout()
plt.savefig(f'{save_path}.pdf', bbox_inches='tight', pad_inches=.25)
plt.savefig(f'{save_path}.png', dpi=300, bbox_inches='tight', pad_inches=.25)
def plot_inputs(
n_modes=15,
x_voxel_size=.15,
y_voxel_size=.15,
z_voxel_size=.6,
psnr=100,
wavelength: float = .605,
psf_cmap: str = 'Spectral_r',
threshold: float = .01,
):
def wavefront(iax, phi, levels, label=''):
mat = iax.contourf(
phi,
levels=levels,
cmap=wave_cmap,
vmin=np.min(levels),
vmax=np.max(levels),
extend='both'
)
iax.set_aspect('equal')
divider = make_axes_locatable(iax)
top = divider.append_axes("top", size='30%', pad=0.2)
top.hist(phi.flatten(), bins=phi.shape[0], color='grey')
top.set_title(label)
top.set_yticks([])
top.xaxis.set_major_formatter(FormatStrFormatter("%.2f"))
top.spines['right'].set_visible(False)
top.spines['top'].set_visible(False)
top.spines['left'].set_visible(False)
return mat
def slice(xy, zx, zy, vol, label='', maxproj=True):
if vol.shape[-1] == 3:
m = xy.imshow(vol[:, :, 0], cmap=psf_cmap, vmin=0, vmax=1)
zx.imshow(vol[:, :, 1], cmap=psf_cmap, vmin=0, vmax=1)
zy.imshow(vol[:, :, 2], cmap=psf_cmap, vmin=0, vmax=1)
else:
#vol = vol ** gamma
#vol = np.nan_to_num(vol)
if maxproj:
m = xy.imshow(np.max(vol, axis=0), cmap=psf_cmap, vmin=0, vmax=1)
zx.imshow(np.max(vol, axis=1), cmap=psf_cmap, vmin=0, vmax=1)
zy.imshow(np.max(vol, axis=2), cmap=psf_cmap, vmin=0, vmax=1)
else:
mid_plane = vol.shape[0] // 2
m = xy.imshow(vol[mid_plane, :, :], cmap=psf_cmap, vmin=0, vmax=1)
zx.imshow(vol[:, mid_plane, :], cmap=psf_cmap, vmin=0, vmax=1)
zy.imshow(vol[:, :, mid_plane], cmap=psf_cmap, vmin=0, vmax=1)
cax = inset_axes(zy, width="10%", height="100%", loc='center right', borderpad=-3)
cb = plt.colorbar(m, cax=cax)
cax.yaxis.set_major_formatter(FormatStrFormatter("%.1f"))
cax.yaxis.set_label_position("right")
xy.set_ylabel(label)
return m
plt.rcParams.update({
'font.size': 10,
'axes.titlesize': 12,
'axes.labelsize': 12,
'xtick.labelsize': 10,
'ytick.labelsize': 10,
'legend.fontsize': 10,
'axes.autolimit_mode': 'round_numbers'
})
#plt.style.use("dark_background")
for i in trange(5, n_modes):
phi = np.zeros(n_modes)
phi[i] = .05
w = Wavefront(phi, order='ansi')
y_wave = w.wave(size=100)
gen = SyntheticPSF(
amplitude_ranges=(-1, 1),
n_modes=n_modes,
lam_detection=wavelength,
psf_shape=3*[32],
x_voxel_size=x_voxel_size,
y_voxel_size=y_voxel_size,
z_voxel_size=z_voxel_size,
snr=psnr,
max_jitter=0,
cpu_workers=-1,
)
inputs = gen.single_otf(w, zplanes=0, normed=True, noise=False)
psf = gen.single_psf(w, zplanes=0, normed=True, noise=False)
otf = | np.fft.fftn(psf) | numpy.fft.fftn |
import numpy as np
from matplotlib import pyplot as plt
class NN:
def __init__(self, h1, seed):
np.random.seed(seed)
self.h1 = h1 # number of hidden units in the hidden layer
def init_params(self, X, Y):
W1 = np.random.randn(self.h1, X.shape[0]) * 0.01
b1 = np.zeros((self.h1, 1))
W2 = np.random.randn(Y.shape[0], self.h1) * 0.01
b2 = np.zeros((Y.shape[0],1))
params = {
'W1':W1,
'b1':b1,
'W2':W2,
'b2':b2
}
return params
def forward_pass(self, params, X):
W1 = params['W1']
b1 = params['b1']
W2 = params['W2']
b2 = params['b2']
Z1 = np.dot(W1, X) + b1 # Forward Propagation
A1 = np.tanh(Z1)
Z2 = np.dot(W2, A1) + b2
A2 = 1/(1+(np.exp(-Z2)))
cache = {
'Z1':Z1,
'A1':A1,
'Z2':Z2,
'A2':A2
}
return cache
def compute_cost(self, cache, Y):
A2 = cache['A2']
m = Y.shape[1]
cost = (-1/m) * np.sum(Y*np.log(A2) + (1-Y)*np.log(1-A2)) # Computing Cost
return cost
def back_prop(self, cache, params, X, Y):
W2 = params['W2']
m = Y.shape[1]
A1 = cache['A1']
A2 = cache['A2']
# Updating the gradient
dZ2 = A2 - Y
dW2 = (1/m) * np.dot(dZ2, A1.transpose())
db2 = (1/m) * (np.sum(dZ2, axis=1, keepdims=True))
dZ1 = np.dot(W2.transpose(),dZ2) * (1-(A1**2))
dW1 = (1/m) * np.dot(dZ1, X.transpose())
db1 = (1/m) * (np.sum(dZ1, axis=1, keepdims=True))
grads = {'dW1':dW1, 'db1':db1, 'dW2':dW2, 'db2':db2}
return grads
def optimize(self, X, Y, grads, params, alpha):
W1 = params['W1']
b1 = params['b1']
W2 = params['W2']
b2 = params['b2']
dW1 = grads['dW1']
db1 = grads['db1']
dW2 = grads['dW2']
db2 = grads['db2']
W1 = W1 - alpha*dW1 # Gradient Descent
b1 = b1 - alpha*db1
W2 = W2 - alpha*dW2
b2 = b2 - alpha*db2
params = {'W1':W1,'b1':b1,'W2':W2,'b2':b2}
return params
def fit(self, X, Y, alpha=0.01, epochs=10000, plot_cost=False, verbose=True):
params = self.init_params(X, Y)
cost = []
iterations = []
c = 0
for i in range (epochs):
cache = self.forward_pass(params, X)
if plot_cost and ((i+1)%1000 == 0):
cost.append(self.compute_cost(cache, Y))
iterations.append(i+1)
if verbose and ((i+1)%1000 == 0):
c = self.compute_cost(cache, Y)
print("Cost after {} iterations is: {}".format((i+1), c))
grads = self.back_prop(cache, params, X, Y)
params = self.optimize(X, Y, grads, params, alpha)
if plot_cost:
plt.plot(iterations, cost)
plt.xlabel('Iterations')
plt.ylabel('Cost')
plt.title("Cost vs. Iterations")
plt.show()
return params, cost, iterations
else:
return params
def predict(self, X, params):
W1 = params['W1']
b1 = params['b1']
W2 = params['W2']
b2 = params['b2']
Z1 = np.dot(W1, X) + b1
A1 = np.tanh(Z1)
Z2 = np.dot(W2, A1) + b2
A2 = 1/(1+( | np.exp(-Z2) | numpy.exp |
import pytest
import numpy as np
import pandas
import pandas.util.testing as tm
from pandas.tests.frame.common import TestData
import matplotlib
import modin.pandas as pd
from modin.pandas.utils import to_pandas
from numpy.testing import assert_array_equal
from .utils import (
random_state,
RAND_LOW,
RAND_HIGH,
df_equals,
df_is_empty,
arg_keys,
name_contains,
test_data_values,
test_data_keys,
test_data_with_duplicates_values,
test_data_with_duplicates_keys,
numeric_dfs,
no_numeric_dfs,
test_func_keys,
test_func_values,
query_func_keys,
query_func_values,
agg_func_keys,
agg_func_values,
numeric_agg_funcs,
quantiles_keys,
quantiles_values,
indices_keys,
indices_values,
axis_keys,
axis_values,
bool_arg_keys,
bool_arg_values,
int_arg_keys,
int_arg_values,
)
# TODO remove once modin-project/modin#469 is resolved
agg_func_keys.remove("str")
agg_func_values.remove(str)
pd.DEFAULT_NPARTITIONS = 4
# Force matplotlib to not use any Xwindows backend.
matplotlib.use("Agg")
class TestDFPartOne:
# Test inter df math functions
def inter_df_math_helper(self, modin_df, pandas_df, op):
# Test dataframe to datframe
try:
pandas_result = getattr(pandas_df, op)(pandas_df)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(modin_df)
else:
modin_result = getattr(modin_df, op)(modin_df)
df_equals(modin_result, pandas_result)
# Test dataframe to int
try:
pandas_result = getattr(pandas_df, op)(4)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(4)
else:
modin_result = getattr(modin_df, op)(4)
df_equals(modin_result, pandas_result)
# Test dataframe to float
try:
pandas_result = getattr(pandas_df, op)(4.0)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(4.0)
else:
modin_result = getattr(modin_df, op)(4.0)
df_equals(modin_result, pandas_result)
# Test transposed dataframes to float
try:
pandas_result = getattr(pandas_df.T, op)(4.0)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df.T, op)(4.0)
else:
modin_result = getattr(modin_df.T, op)(4.0)
df_equals(modin_result, pandas_result)
frame_data = {
"{}_other".format(modin_df.columns[0]): [0, 2],
modin_df.columns[0]: [0, 19],
modin_df.columns[1]: [1, 1],
}
modin_df2 = pd.DataFrame(frame_data)
pandas_df2 = pandas.DataFrame(frame_data)
# Test dataframe to different dataframe shape
try:
pandas_result = getattr(pandas_df, op)(pandas_df2)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(modin_df2)
else:
modin_result = getattr(modin_df, op)(modin_df2)
df_equals(modin_result, pandas_result)
# Test dataframe to list
list_test = random_state.randint(RAND_LOW, RAND_HIGH, size=(modin_df.shape[1]))
try:
pandas_result = getattr(pandas_df, op)(list_test, axis=1)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(list_test, axis=1)
else:
modin_result = getattr(modin_df, op)(list_test, axis=1)
df_equals(modin_result, pandas_result)
# Test dataframe to series
series_test_modin = modin_df[modin_df.columns[0]]
series_test_pandas = pandas_df[pandas_df.columns[0]]
try:
pandas_result = getattr(pandas_df, op)(series_test_pandas, axis=0)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(series_test_modin, axis=0)
else:
modin_result = getattr(modin_df, op)(series_test_modin, axis=0)
df_equals(modin_result, pandas_result)
# Test dataframe to series with different index
series_test_modin = modin_df[modin_df.columns[0]].reset_index(drop=True)
series_test_pandas = pandas_df[pandas_df.columns[0]].reset_index(drop=True)
try:
pandas_result = getattr(pandas_df, op)(series_test_pandas, axis=0)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(series_test_modin, axis=0)
else:
modin_result = getattr(modin_df, op)(series_test_modin, axis=0)
df_equals(modin_result, pandas_result)
# Level test
new_idx = pandas.MultiIndex.from_tuples(
[(i // 4, i // 2, i) for i in modin_df.index]
)
modin_df_multi_level = modin_df.copy()
modin_df_multi_level.index = new_idx
# Defaults to pandas
with pytest.warns(UserWarning):
# Operation against self for sanity check
getattr(modin_df_multi_level, op)(modin_df_multi_level, axis=0, level=1)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_add(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "add")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_div(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "div")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_divide(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "divide")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_floordiv(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "floordiv")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_mod(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "mod")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_mul(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "mul")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_multiply(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "multiply")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_pow(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
# TODO: Revert to others once we have an efficient way of preprocessing for positive
# values
try:
pandas_df = pandas_df.abs()
except Exception:
pass
else:
modin_df = modin_df.abs()
self.inter_df_math_helper(modin_df, pandas_df, "pow")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_sub(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "sub")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_subtract(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "subtract")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_truediv(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "truediv")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___div__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__div__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___add__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__add__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___radd__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__radd__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___mul__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__mul__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___rmul__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__rmul__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___pow__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__pow__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___rpow__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__rpow__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___sub__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__sub__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___floordiv__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__floordiv__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___rfloordiv__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__rfloordiv__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___truediv__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__truediv__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___rtruediv__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__rtruediv__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___mod__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__mod__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___rmod__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__rmod__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___rdiv__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__rdiv__")
# END test inter df math functions
# Test comparison of inter operation functions
def comparison_inter_ops_helper(self, modin_df, pandas_df, op):
try:
pandas_result = getattr(pandas_df, op)(pandas_df)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(modin_df)
else:
modin_result = getattr(modin_df, op)(modin_df)
df_equals(modin_result, pandas_result)
try:
pandas_result = getattr(pandas_df, op)(4)
except TypeError:
with pytest.raises(TypeError):
getattr(modin_df, op)(4)
else:
modin_result = getattr(modin_df, op)(4)
df_equals(modin_result, pandas_result)
try:
pandas_result = getattr(pandas_df, op)(4.0)
except TypeError:
with pytest.raises(TypeError):
getattr(modin_df, op)(4.0)
else:
modin_result = getattr(modin_df, op)(4.0)
df_equals(modin_result, pandas_result)
try:
pandas_result = getattr(pandas_df, op)("a")
except TypeError:
with pytest.raises(TypeError):
repr(getattr(modin_df, op)("a"))
else:
modin_result = getattr(modin_df, op)("a")
df_equals(modin_result, pandas_result)
frame_data = {
"{}_other".format(modin_df.columns[0]): [0, 2],
modin_df.columns[0]: [0, 19],
modin_df.columns[1]: [1, 1],
}
modin_df2 = pd.DataFrame(frame_data)
pandas_df2 = pandas.DataFrame(frame_data)
try:
pandas_result = getattr(pandas_df, op)(pandas_df2)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(modin_df2)
else:
modin_result = getattr(modin_df, op)(modin_df2)
df_equals(modin_result, pandas_result)
new_idx = pandas.MultiIndex.from_tuples(
[(i // 4, i // 2, i) for i in modin_df.index]
)
modin_df_multi_level = modin_df.copy()
modin_df_multi_level.index = new_idx
# Defaults to pandas
with pytest.warns(UserWarning):
# Operation against self for sanity check
getattr(modin_df_multi_level, op)(modin_df_multi_level, axis=0, level=1)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_eq(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.comparison_inter_ops_helper(modin_df, pandas_df, "eq")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_ge(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.comparison_inter_ops_helper(modin_df, pandas_df, "ge")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_gt(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.comparison_inter_ops_helper(modin_df, pandas_df, "gt")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_le(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.comparison_inter_ops_helper(modin_df, pandas_df, "le")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_lt(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.comparison_inter_ops_helper(modin_df, pandas_df, "lt")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_ne(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.comparison_inter_ops_helper(modin_df, pandas_df, "ne")
# END test comparison of inter operation functions
# Test dataframe right operations
def inter_df_math_right_ops_helper(self, modin_df, pandas_df, op):
try:
pandas_result = getattr(pandas_df, op)(4)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(4)
else:
modin_result = getattr(modin_df, op)(4)
df_equals(modin_result, pandas_result)
try:
pandas_result = getattr(pandas_df, op)(4.0)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(4.0)
else:
modin_result = getattr(modin_df, op)(4.0)
df_equals(modin_result, pandas_result)
new_idx = pandas.MultiIndex.from_tuples(
[(i // 4, i // 2, i) for i in modin_df.index]
)
modin_df_multi_level = modin_df.copy()
modin_df_multi_level.index = new_idx
# Defaults to pandas
with pytest.warns(UserWarning):
# Operation against self for sanity check
getattr(modin_df_multi_level, op)(modin_df_multi_level, axis=0, level=1)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_radd(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_right_ops_helper(modin_df, pandas_df, "radd")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_rdiv(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_right_ops_helper(modin_df, pandas_df, "rdiv")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_rfloordiv(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_right_ops_helper(modin_df, pandas_df, "rfloordiv")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_rmod(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_right_ops_helper(modin_df, pandas_df, "rmod")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_rmul(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_right_ops_helper(modin_df, pandas_df, "rmul")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_rpow(self, request, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
# TODO: Revert to others once we have an efficient way of preprocessing for positive values
# We need to check that negative integers are not used efficiently
if "100x100" not in request.node.name:
self.inter_df_math_right_ops_helper(modin_df, pandas_df, "rpow")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_rsub(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_right_ops_helper(modin_df, pandas_df, "rsub")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_rtruediv(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_right_ops_helper(modin_df, pandas_df, "rtruediv")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___rsub__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_right_ops_helper(modin_df, pandas_df, "__rsub__")
# END test dataframe right operations
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_abs(self, request, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.abs()
except Exception as e:
with pytest.raises(type(e)):
modin_df.abs()
else:
modin_result = modin_df.abs()
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_add_prefix(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
test_prefix = "TEST"
new_modin_df = modin_df.add_prefix(test_prefix)
new_pandas_df = pandas_df.add_prefix(test_prefix)
df_equals(new_modin_df.columns, new_pandas_df.columns)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("testfunc", test_func_values, ids=test_func_keys)
def test_applymap(self, request, data, testfunc):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
with pytest.raises(ValueError):
x = 2
modin_df.applymap(x)
try:
pandas_result = pandas_df.applymap(testfunc)
except Exception as e:
with pytest.raises(type(e)):
modin_df.applymap(testfunc)
else:
modin_result = modin_df.applymap(testfunc)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("testfunc", test_func_values, ids=test_func_keys)
def test_applymap_numeric(self, request, data, testfunc):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
if name_contains(request.node.name, numeric_dfs):
try:
pandas_result = pandas_df.applymap(testfunc)
except Exception as e:
with pytest.raises(type(e)):
modin_df.applymap(testfunc)
else:
modin_result = modin_df.applymap(testfunc)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_add_suffix(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
test_suffix = "TEST"
new_modin_df = modin_df.add_suffix(test_suffix)
new_pandas_df = pandas_df.add_suffix(test_suffix)
df_equals(new_modin_df.columns, new_pandas_df.columns)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_at(self, request, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
# We skip nan datasets because nan != nan
if "nan" not in request.node.name:
key1 = modin_df.columns[0]
# Scaler
assert modin_df.at[0, key1] == pandas_df.at[0, key1]
# Series
df_equals(modin_df.loc[0].at[key1], pandas_df.loc[0].at[key1])
# Write Item
modin_df_copy = modin_df.copy()
pandas_df_copy = pandas_df.copy()
modin_df_copy.at[1, key1] = modin_df.at[0, key1]
pandas_df_copy.at[1, key1] = pandas_df.at[0, key1]
df_equals(modin_df_copy, pandas_df_copy)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_axes(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
for modin_axis, pd_axis in zip(modin_df.axes, pandas_df.axes):
assert np.array_equal(modin_axis, pd_axis)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_copy(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data) # noqa F841
# pandas_df is unused but there so there won't be confusing list comprehension
# stuff in the pytest.mark.parametrize
new_modin_df = modin_df.copy()
assert new_modin_df is not modin_df
assert np.array_equal(
new_modin_df._query_compiler._modin_frame._partitions,
modin_df._query_compiler._modin_frame._partitions,
)
assert new_modin_df is not modin_df
df_equals(new_modin_df, modin_df)
# Shallow copy tests
modin_df = pd.DataFrame(data)
modin_df_cp = modin_df.copy(False)
modin_df[modin_df.columns[0]] = 0
df_equals(modin_df, modin_df_cp)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_dtypes(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(modin_df.dtypes, pandas_df.dtypes)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_ftypes(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(modin_df.ftypes, pandas_df.ftypes)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("key", indices_values, ids=indices_keys)
def test_get(self, data, key):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(modin_df.get(key), pandas_df.get(key))
df_equals(
modin_df.get(key, default="default"), pandas_df.get(key, default="default")
)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_get_dtype_counts(self, data):
modin_result = pd.DataFrame(data).get_dtype_counts().sort_index()
pandas_result = pandas.DataFrame(data).get_dtype_counts().sort_index()
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize(
"dummy_na", bool_arg_values, ids=arg_keys("dummy_na", bool_arg_keys)
)
@pytest.mark.parametrize(
"drop_first", bool_arg_values, ids=arg_keys("drop_first", bool_arg_keys)
)
def test_get_dummies(self, request, data, dummy_na, drop_first):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas.get_dummies(
pandas_df, dummy_na=dummy_na, drop_first=drop_first
)
except Exception as e:
with pytest.raises(type(e)):
pd.get_dummies(modin_df, dummy_na=dummy_na, drop_first=drop_first)
else:
modin_result = pd.get_dummies(
modin_df, dummy_na=dummy_na, drop_first=drop_first
)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_get_ftype_counts(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(modin_df.get_ftype_counts(), pandas_df.get_ftype_counts())
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize("func", agg_func_values, ids=agg_func_keys)
def test_agg(self, data, axis, func):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.agg(func, axis)
except Exception as e:
with pytest.raises(type(e)):
modin_df.agg(func, axis)
else:
modin_result = modin_df.agg(func, axis)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize("func", agg_func_values, ids=agg_func_keys)
def test_agg_numeric(self, request, data, axis, func):
if name_contains(request.node.name, numeric_agg_funcs) and name_contains(
request.node.name, numeric_dfs
):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.agg(func, axis)
except Exception as e:
with pytest.raises(type(e)):
modin_df.agg(func, axis)
else:
modin_result = modin_df.agg(func, axis)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize("func", agg_func_values, ids=agg_func_keys)
def test_aggregate(self, request, data, func, axis):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.aggregate(func, axis)
except Exception as e:
with pytest.raises(type(e)):
modin_df.aggregate(func, axis)
else:
modin_result = modin_df.aggregate(func, axis)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize("func", agg_func_values, ids=agg_func_keys)
def test_aggregate_numeric(self, request, data, axis, func):
if name_contains(request.node.name, numeric_agg_funcs) and name_contains(
request.node.name, numeric_dfs
):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.agg(func, axis)
except Exception as e:
with pytest.raises(type(e)):
modin_df.agg(func, axis)
else:
modin_result = modin_df.agg(func, axis)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_aggregate_error_checking(self, data):
modin_df = pd.DataFrame(data)
assert modin_df.aggregate("ndim") == 2
with pytest.warns(UserWarning):
modin_df.aggregate(
{modin_df.columns[0]: "sum", modin_df.columns[1]: "mean"}
)
with pytest.warns(UserWarning):
modin_df.aggregate("cumproduct")
with pytest.raises(ValueError):
modin_df.aggregate("NOT_EXISTS")
def test_align(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).align(pd.DataFrame(data))
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
@pytest.mark.parametrize(
"bool_only", bool_arg_values, ids=arg_keys("bool_only", bool_arg_keys)
)
def test_all(self, data, axis, skipna, bool_only):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.all(axis=axis, skipna=skipna, bool_only=bool_only)
except Exception as e:
with pytest.raises(type(e)):
modin_df.all(axis=axis, skipna=skipna, bool_only=bool_only)
else:
modin_result = modin_df.all(axis=axis, skipna=skipna, bool_only=bool_only)
df_equals(modin_result, pandas_result)
# Test when axis is None. This will get repeated but easier than using list in parameterize decorator
try:
pandas_result = pandas_df.all(axis=None, skipna=skipna, bool_only=bool_only)
except Exception as e:
with pytest.raises(type(e)):
modin_df.all(axis=None, skipna=skipna, bool_only=bool_only)
else:
modin_result = modin_df.all(axis=None, skipna=skipna, bool_only=bool_only)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.T.all(
axis=axis, skipna=skipna, bool_only=bool_only
)
except Exception as e:
with pytest.raises(type(e)):
modin_df.T.all(axis=axis, skipna=skipna, bool_only=bool_only)
else:
modin_result = modin_df.T.all(axis=axis, skipna=skipna, bool_only=bool_only)
df_equals(modin_result, pandas_result)
# Test when axis is None. This will get repeated but easier than using list in parameterize decorator
try:
pandas_result = pandas_df.T.all(
axis=None, skipna=skipna, bool_only=bool_only
)
except Exception as e:
with pytest.raises(type(e)):
modin_df.T.all(axis=None, skipna=skipna, bool_only=bool_only)
else:
modin_result = modin_df.T.all(axis=None, skipna=skipna, bool_only=bool_only)
df_equals(modin_result, pandas_result)
# test level
modin_df_multi_level = modin_df.copy()
pandas_df_multi_level = pandas_df.copy()
axis = modin_df._get_axis_number(axis) if axis is not None else 0
levels = 3
axis_names_list = [["a", "b", "c"], None]
for axis_names in axis_names_list:
if axis == 0:
new_idx = pandas.MultiIndex.from_tuples(
[(i // 4, i // 2, i) for i in range(len(modin_df.index))],
names=axis_names,
)
modin_df_multi_level.index = new_idx
pandas_df_multi_level.index = new_idx
else:
new_col = pandas.MultiIndex.from_tuples(
[(i // 4, i // 2, i) for i in range(len(modin_df.columns))],
names=axis_names,
)
modin_df_multi_level.columns = new_col
pandas_df_multi_level.columns = new_col
for level in list(range(levels)) + (axis_names if axis_names else []):
try:
pandas_multi_level_result = pandas_df_multi_level.all(
axis=axis, bool_only=bool_only, level=level, skipna=skipna
)
except Exception as e:
with pytest.raises(type(e)):
modin_df_multi_level.all(
axis=axis, bool_only=bool_only, level=level, skipna=skipna
)
else:
modin_multi_level_result = modin_df_multi_level.all(
axis=axis, bool_only=bool_only, level=level, skipna=skipna
)
df_equals(modin_multi_level_result, pandas_multi_level_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
@pytest.mark.parametrize(
"bool_only", bool_arg_values, ids=arg_keys("bool_only", bool_arg_keys)
)
def test_any(self, data, axis, skipna, bool_only):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.any(axis=axis, skipna=skipna, bool_only=bool_only)
except Exception as e:
with pytest.raises(type(e)):
modin_df.any(axis=axis, skipna=skipna, bool_only=bool_only)
else:
modin_result = modin_df.any(axis=axis, skipna=skipna, bool_only=bool_only)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.any(axis=None, skipna=skipna, bool_only=bool_only)
except Exception as e:
with pytest.raises(type(e)):
modin_df.any(axis=None, skipna=skipna, bool_only=bool_only)
else:
modin_result = modin_df.any(axis=None, skipna=skipna, bool_only=bool_only)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.T.any(
axis=axis, skipna=skipna, bool_only=bool_only
)
except Exception as e:
with pytest.raises(type(e)):
modin_df.T.any(axis=axis, skipna=skipna, bool_only=bool_only)
else:
modin_result = modin_df.T.any(axis=axis, skipna=skipna, bool_only=bool_only)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.T.any(
axis=None, skipna=skipna, bool_only=bool_only
)
except Exception as e:
with pytest.raises(type(e)):
modin_df.T.any(axis=None, skipna=skipna, bool_only=bool_only)
else:
modin_result = modin_df.T.any(axis=None, skipna=skipna, bool_only=bool_only)
df_equals(modin_result, pandas_result)
# test level
modin_df_multi_level = modin_df.copy()
pandas_df_multi_level = pandas_df.copy()
axis = modin_df._get_axis_number(axis) if axis is not None else 0
levels = 3
axis_names_list = [["a", "b", "c"], None]
for axis_names in axis_names_list:
if axis == 0:
new_idx = pandas.MultiIndex.from_tuples(
[(i // 4, i // 2, i) for i in range(len(modin_df.index))],
names=axis_names,
)
modin_df_multi_level.index = new_idx
pandas_df_multi_level.index = new_idx
else:
new_col = pandas.MultiIndex.from_tuples(
[(i // 4, i // 2, i) for i in range(len(modin_df.columns))],
names=axis_names,
)
modin_df_multi_level.columns = new_col
pandas_df_multi_level.columns = new_col
for level in list(range(levels)) + (axis_names if axis_names else []):
try:
pandas_multi_level_result = pandas_df_multi_level.any(
axis=axis, bool_only=bool_only, level=level, skipna=skipna
)
except Exception as e:
with pytest.raises(type(e)):
modin_df_multi_level.any(
axis=axis, bool_only=bool_only, level=level, skipna=skipna
)
else:
modin_multi_level_result = modin_df_multi_level.any(
axis=axis, bool_only=bool_only, level=level, skipna=skipna
)
df_equals(modin_multi_level_result, pandas_multi_level_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_append(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
data_to_append = {"append_a": 2, "append_b": 1000}
ignore_idx_values = [True, False]
for ignore in ignore_idx_values:
try:
pandas_result = pandas_df.append(data_to_append, ignore_index=ignore)
except Exception as e:
with pytest.raises(type(e)):
modin_df.append(data_to_append, ignore_index=ignore)
else:
modin_result = modin_df.append(data_to_append, ignore_index=ignore)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.append(pandas_df.iloc[-1])
except Exception as e:
with pytest.raises(type(e)):
modin_df.append(modin_df.iloc[-1])
else:
modin_result = modin_df.append(modin_df.iloc[-1])
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.append(list(pandas_df.iloc[-1]))
except Exception as e:
with pytest.raises(type(e)):
modin_df.append(list(modin_df.iloc[-1]))
else:
modin_result = modin_df.append(list(modin_df.iloc[-1]))
df_equals(modin_result, pandas_result)
verify_integrity_values = [True, False]
for verify_integrity in verify_integrity_values:
try:
pandas_result = pandas_df.append(
[pandas_df, pandas_df], verify_integrity=verify_integrity
)
except Exception as e:
with pytest.raises(type(e)):
modin_df.append(
[modin_df, modin_df], verify_integrity=verify_integrity
)
else:
modin_result = modin_df.append(
[modin_df, modin_df], verify_integrity=verify_integrity
)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.append(
pandas_df, verify_integrity=verify_integrity
)
except Exception as e:
with pytest.raises(type(e)):
modin_df.append(modin_df, verify_integrity=verify_integrity)
else:
modin_result = modin_df.append(
modin_df, verify_integrity=verify_integrity
)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize("func", agg_func_values, ids=agg_func_keys)
def test_apply(self, request, data, func, axis):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
with pytest.raises(TypeError):
modin_df.apply({"row": func}, axis=1)
try:
pandas_result = pandas_df.apply(func, axis)
except Exception as e:
with pytest.raises(type(e)):
modin_df.apply(func, axis)
else:
modin_result = modin_df.apply(func, axis)
df_equals(modin_result, pandas_result)
def test_apply_metadata(self):
def add(a, b, c):
return a + b + c
data = {"A": [1, 2, 3], "B": [4, 5, 6], "C": [7, 8, 9]}
modin_df = pd.DataFrame(data)
modin_df["add"] = modin_df.apply(
lambda row: add(row["A"], row["B"], row["C"]), axis=1
)
pandas_df = pandas.DataFrame(data)
pandas_df["add"] = pandas_df.apply(
lambda row: add(row["A"], row["B"], row["C"]), axis=1
)
df_equals(modin_df, pandas_df)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("func", agg_func_values, ids=agg_func_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
def test_apply_numeric(self, request, data, func, axis):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
if name_contains(request.node.name, numeric_dfs):
try:
pandas_result = pandas_df.apply(func, axis)
except Exception as e:
with pytest.raises(type(e)):
modin_df.apply(func, axis)
else:
modin_result = modin_df.apply(func, axis)
df_equals(modin_result, pandas_result)
if "empty_data" not in request.node.name:
key = modin_df.columns[0]
modin_result = modin_df.apply(lambda df: df.drop(key), axis=1)
pandas_result = pandas_df.apply(lambda df: df.drop(key), axis=1)
df_equals(modin_result, pandas_result)
def test_as_blocks(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).as_blocks()
def test_as_matrix(self):
test_data = TestData()
frame = pd.DataFrame(test_data.frame)
mat = frame.as_matrix()
frame_columns = frame.columns
for i, row in enumerate(mat):
for j, value in enumerate(row):
col = frame_columns[j]
if np.isnan(value):
assert np.isnan(frame[col][i])
else:
assert value == frame[col][i]
# mixed type
mat = pd.DataFrame(test_data.mixed_frame).as_matrix(["foo", "A"])
assert mat[0, 0] == "bar"
df = pd.DataFrame({"real": [1, 2, 3], "complex": [1j, 2j, 3j]})
mat = df.as_matrix()
assert mat[0, 1] == 1j
# single block corner case
mat = pd.DataFrame(test_data.frame).as_matrix(["A", "B"])
expected = test_data.frame.reindex(columns=["A", "B"]).values
tm.assert_almost_equal(mat, expected)
def test_to_numpy(self):
test_data = TestData()
frame = pd.DataFrame(test_data.frame)
assert_array_equal(frame.values, test_data.frame.values)
def test_partition_to_numpy(self):
test_data = TestData()
frame = pd.DataFrame(test_data.frame)
for (
partition
) in frame._query_compiler._modin_frame._partitions.flatten().tolist():
assert_array_equal(partition.to_pandas().values, partition.to_numpy())
def test_asfreq(self):
index = pd.date_range("1/1/2000", periods=4, freq="T")
series = pd.Series([0.0, None, 2.0, 3.0], index=index)
df = pd.DataFrame({"s": series})
with pytest.warns(UserWarning):
# We are only testing that this defaults to pandas, so we will just check for
# the warning
df.asfreq(freq="30S")
def test_asof(self):
df = pd.DataFrame(
{"a": [10, 20, 30, 40, 50], "b": [None, None, None, None, 500]},
index=pd.DatetimeIndex(
[
"2018-02-27 09:01:00",
"2018-02-27 09:02:00",
"2018-02-27 09:03:00",
"2018-02-27 09:04:00",
"2018-02-27 09:05:00",
]
),
)
with pytest.warns(UserWarning):
df.asof(pd.DatetimeIndex(["2018-02-27 09:03:30", "2018-02-27 09:04:30"]))
def test_assign(self):
data = test_data_values[0]
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
with pytest.warns(UserWarning):
modin_result = modin_df.assign(new_column=pd.Series(modin_df.iloc[:, 0]))
pandas_result = pandas_df.assign(new_column=pd.Series(pandas_df.iloc[:, 0]))
df_equals(modin_result, pandas_result)
def test_astype(self):
td = TestData()
modin_df = pd.DataFrame(
td.frame.values, index=td.frame.index, columns=td.frame.columns
)
expected_df = pandas.DataFrame(
td.frame.values, index=td.frame.index, columns=td.frame.columns
)
modin_df_casted = modin_df.astype(np.int32)
expected_df_casted = expected_df.astype(np.int32)
df_equals(modin_df_casted, expected_df_casted)
modin_df_casted = modin_df.astype(np.float64)
expected_df_casted = expected_df.astype(np.float64)
df_equals(modin_df_casted, expected_df_casted)
modin_df_casted = modin_df.astype(str)
expected_df_casted = expected_df.astype(str)
df_equals(modin_df_casted, expected_df_casted)
modin_df_casted = modin_df.astype("category")
expected_df_casted = expected_df.astype("category")
df_equals(modin_df_casted, expected_df_casted)
dtype_dict = {"A": np.int32, "B": np.int64, "C": str}
modin_df_casted = modin_df.astype(dtype_dict)
expected_df_casted = expected_df.astype(dtype_dict)
df_equals(modin_df_casted, expected_df_casted)
# Ignore lint because this is testing bad input
bad_dtype_dict = {"B": np.int32, "B": np.int64, "B": str} # noqa F601
modin_df_casted = modin_df.astype(bad_dtype_dict)
expected_df_casted = expected_df.astype(bad_dtype_dict)
df_equals(modin_df_casted, expected_df_casted)
with pytest.raises(KeyError):
modin_df.astype({"not_exists": np.uint8})
def test_astype_category(self):
modin_df = pd.DataFrame(
{"col1": ["A", "A", "B", "B", "A"], "col2": [1, 2, 3, 4, 5]}
)
pandas_df = pandas.DataFrame(
{"col1": ["A", "A", "B", "B", "A"], "col2": [1, 2, 3, 4, 5]}
)
modin_result = modin_df.astype({"col1": "category"})
pandas_result = pandas_df.astype({"col1": "category"})
df_equals(modin_result, pandas_result)
assert modin_result.dtypes.equals(pandas_result.dtypes)
modin_result = modin_df.astype("category")
pandas_result = pandas_df.astype("category")
df_equals(modin_result, pandas_result)
assert modin_result.dtypes.equals(pandas_result.dtypes)
def test_at_time(self):
i = pd.date_range("2018-04-09", periods=4, freq="12H")
ts = pd.DataFrame({"A": [1, 2, 3, 4]}, index=i)
with pytest.warns(UserWarning):
ts.at_time("12:00")
def test_between_time(self):
i = pd.date_range("2018-04-09", periods=4, freq="12H")
ts = pd.DataFrame({"A": [1, 2, 3, 4]}, index=i)
with pytest.warns(UserWarning):
ts.between_time("0:15", "0:45")
def test_bfill(self):
test_data = TestData()
test_data.tsframe["A"][:5] = np.nan
test_data.tsframe["A"][-5:] = np.nan
modin_df = pd.DataFrame(test_data.tsframe)
df_equals(modin_df.bfill(), test_data.tsframe.bfill())
def test_blocks(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).blocks
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_bool(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data) # noqa F841
with pytest.raises(ValueError):
modin_df.bool()
modin_df.__bool__()
single_bool_pandas_df = pandas.DataFrame([True])
single_bool_modin_df = pd.DataFrame([True])
assert single_bool_pandas_df.bool() == single_bool_modin_df.bool()
with pytest.raises(ValueError):
# __bool__ always raises this error for DataFrames
single_bool_modin_df.__bool__()
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_boxplot(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data) # noqa F841
assert modin_df.boxplot() == to_pandas(modin_df).boxplot()
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
def test_clip(self, request, data, axis):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
if name_contains(request.node.name, numeric_dfs):
ind_len = (
len(modin_df.index)
if not pandas.DataFrame()._get_axis_number(axis)
else len(modin_df.columns)
)
# set bounds
lower, upper = np.sort(random_state.random_integers(RAND_LOW, RAND_HIGH, 2))
lower_list = random_state.random_integers(RAND_LOW, RAND_HIGH, ind_len)
upper_list = random_state.random_integers(RAND_LOW, RAND_HIGH, ind_len)
# test only upper scalar bound
modin_result = modin_df.clip(None, upper, axis=axis)
pandas_result = pandas_df.clip(None, upper, axis=axis)
df_equals(modin_result, pandas_result)
# test lower and upper scalar bound
modin_result = modin_df.clip(lower, upper, axis=axis)
pandas_result = pandas_df.clip(lower, upper, axis=axis)
df_equals(modin_result, pandas_result)
# test lower and upper list bound on each column
modin_result = modin_df.clip(lower_list, upper_list, axis=axis)
pandas_result = pandas_df.clip(lower_list, upper_list, axis=axis)
df_equals(modin_result, pandas_result)
# test only upper list bound on each column
modin_result = modin_df.clip(np.nan, upper_list, axis=axis)
pandas_result = pandas_df.clip(np.nan, upper_list, axis=axis)
df_equals(modin_result, pandas_result)
with pytest.raises(ValueError):
modin_df.clip(lower=[1, 2, 3], axis=None)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
def test_clip_lower(self, request, data, axis):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
if name_contains(request.node.name, numeric_dfs):
ind_len = (
len(modin_df.index)
if not pandas.DataFrame()._get_axis_number(axis)
else len(modin_df.columns)
)
# set bounds
lower = random_state.random_integers(RAND_LOW, RAND_HIGH, 1)[0]
lower_list = random_state.random_integers(RAND_LOW, RAND_HIGH, ind_len)
# test lower scalar bound
pandas_result = pandas_df.clip_lower(lower, axis=axis)
modin_result = modin_df.clip_lower(lower, axis=axis)
df_equals(modin_result, pandas_result)
# test lower list bound on each column
pandas_result = pandas_df.clip_lower(lower_list, axis=axis)
modin_result = modin_df.clip_lower(lower_list, axis=axis)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
def test_clip_upper(self, request, data, axis):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
if name_contains(request.node.name, numeric_dfs):
ind_len = (
len(modin_df.index)
if not pandas.DataFrame()._get_axis_number(axis)
else len(modin_df.columns)
)
# set bounds
upper = random_state.random_integers(RAND_LOW, RAND_HIGH, 1)[0]
upper_list = random_state.random_integers(RAND_LOW, RAND_HIGH, ind_len)
# test upper scalar bound
modin_result = modin_df.clip_upper(upper, axis=axis)
pandas_result = pandas_df.clip_upper(upper, axis=axis)
df_equals(modin_result, pandas_result)
# test upper list bound on each column
modin_result = modin_df.clip_upper(upper_list, axis=axis)
pandas_result = pandas_df.clip_upper(upper_list, axis=axis)
df_equals(modin_result, pandas_result)
def test_combine(self):
df1 = pd.DataFrame({"A": [0, 0], "B": [4, 4]})
df2 = pd.DataFrame({"A": [1, 1], "B": [3, 3]})
with pytest.warns(UserWarning):
df1.combine(df2, lambda s1, s2: s1 if s1.sum() < s2.sum() else s2)
def test_combine_first(self):
df1 = pd.DataFrame({"A": [None, 0], "B": [None, 4]})
df2 = pd.DataFrame({"A": [1, 1], "B": [3, 3]})
with pytest.warns(UserWarning):
df1.combine_first(df2)
def test_compound(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).compound()
def test_corr(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).corr()
def test_corrwith(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).corrwith(pd.DataFrame(data))
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"numeric_only", bool_arg_values, ids=arg_keys("numeric_only", bool_arg_keys)
)
def test_count(self, request, data, axis, numeric_only):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_result = modin_df.count(axis=axis, numeric_only=numeric_only)
pandas_result = pandas_df.count(axis=axis, numeric_only=numeric_only)
df_equals(modin_result, pandas_result)
modin_result = modin_df.T.count(axis=axis, numeric_only=numeric_only)
pandas_result = pandas_df.T.count(axis=axis, numeric_only=numeric_only)
df_equals(modin_result, pandas_result)
# test level
modin_df_multi_level = modin_df.copy()
pandas_df_multi_level = pandas_df.copy()
axis = modin_df._get_axis_number(axis) if axis is not None else 0
levels = 3
axis_names_list = [["a", "b", "c"], None]
for axis_names in axis_names_list:
if axis == 0:
new_idx = pandas.MultiIndex.from_tuples(
[(i // 4, i // 2, i) for i in range(len(modin_df.index))],
names=axis_names,
)
modin_df_multi_level.index = new_idx
pandas_df_multi_level.index = new_idx
try: # test error
pandas_df_multi_level.count(
axis=1, numeric_only=numeric_only, level=0
)
except Exception as e:
with pytest.raises(type(e)):
modin_df_multi_level.count(
axis=1, numeric_only=numeric_only, level=0
)
else:
new_col = pandas.MultiIndex.from_tuples(
[(i // 4, i // 2, i) for i in range(len(modin_df.columns))],
names=axis_names,
)
modin_df_multi_level.columns = new_col
pandas_df_multi_level.columns = new_col
try: # test error
pandas_df_multi_level.count(
axis=0, numeric_only=numeric_only, level=0
)
except Exception as e:
with pytest.raises(type(e)):
modin_df_multi_level.count(
axis=0, numeric_only=numeric_only, level=0
)
for level in list(range(levels)) + (axis_names if axis_names else []):
modin_multi_level_result = modin_df_multi_level.count(
axis=axis, numeric_only=numeric_only, level=level
)
pandas_multi_level_result = pandas_df_multi_level.count(
axis=axis, numeric_only=numeric_only, level=level
)
df_equals(modin_multi_level_result, pandas_multi_level_result)
def test_cov(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).cov()
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
def test_cummax(self, request, data, axis, skipna):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.cummax(axis=axis, skipna=skipna)
except Exception as e:
with pytest.raises(type(e)):
modin_df.cummax(axis=axis, skipna=skipna)
else:
modin_result = modin_df.cummax(axis=axis, skipna=skipna)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.T.cummax(axis=axis, skipna=skipna)
except Exception as e:
with pytest.raises(type(e)):
modin_df.T.cummax(axis=axis, skipna=skipna)
else:
modin_result = modin_df.T.cummax(axis=axis, skipna=skipna)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
def test_cummin(self, request, data, axis, skipna):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.cummin(axis=axis, skipna=skipna)
except Exception as e:
with pytest.raises(type(e)):
modin_df.cummin(axis=axis, skipna=skipna)
else:
modin_result = modin_df.cummin(axis=axis, skipna=skipna)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.T.cummin(axis=axis, skipna=skipna)
except Exception as e:
with pytest.raises(type(e)):
modin_df.T.cummin(axis=axis, skipna=skipna)
else:
modin_result = modin_df.T.cummin(axis=axis, skipna=skipna)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
def test_cumprod(self, request, data, axis, skipna):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.cumprod(axis=axis, skipna=skipna)
except Exception as e:
with pytest.raises(type(e)):
modin_df.cumprod(axis=axis, skipna=skipna)
else:
modin_result = modin_df.cumprod(axis=axis, skipna=skipna)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.T.cumprod(axis=axis, skipna=skipna)
except Exception as e:
with pytest.raises(type(e)):
modin_df.T.cumprod(axis=axis, skipna=skipna)
else:
modin_result = modin_df.T.cumprod(axis=axis, skipna=skipna)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
def test_cumsum(self, request, data, axis, skipna):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
# pandas exhibits weird behavior for this case
# Remove this case when we can pull the error messages from backend
if name_contains(request.node.name, ["datetime_timedelta_data"]) and (
axis == 0 or axis == "rows"
):
with pytest.raises(TypeError):
modin_df.cumsum(axis=axis, skipna=skipna)
else:
try:
pandas_result = pandas_df.cumsum(axis=axis, skipna=skipna)
except Exception as e:
with pytest.raises(type(e)):
modin_df.cumsum(axis=axis, skipna=skipna)
else:
modin_result = modin_df.cumsum(axis=axis, skipna=skipna)
df_equals(modin_result, pandas_result)
if name_contains(request.node.name, ["datetime_timedelta_data"]) and (
axis == 0 or axis == "rows"
):
with pytest.raises(TypeError):
modin_df.T.cumsum(axis=axis, skipna=skipna)
else:
try:
pandas_result = pandas_df.T.cumsum(axis=axis, skipna=skipna)
except Exception as e:
with pytest.raises(type(e)):
modin_df.T.cumsum(axis=axis, skipna=skipna)
else:
modin_result = modin_df.T.cumsum(axis=axis, skipna=skipna)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_describe(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(modin_df.describe(), pandas_df.describe())
percentiles = [0.10, 0.11, 0.44, 0.78, 0.99]
df_equals(
modin_df.describe(percentiles=percentiles),
pandas_df.describe(percentiles=percentiles),
)
try:
pandas_result = pandas_df.describe(exclude=[np.float64])
except Exception as e:
with pytest.raises(type(e)):
modin_df.describe(exclude=[np.float64])
else:
modin_result = modin_df.describe(exclude=[np.float64])
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.describe(exclude=np.float64)
except Exception as e:
with pytest.raises(type(e)):
modin_df.describe(exclude=np.float64)
else:
modin_result = modin_df.describe(exclude=np.float64)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.describe(
include=[np.timedelta64, np.datetime64, np.object, np.bool]
)
except Exception as e:
with pytest.raises(type(e)):
modin_df.describe(
include=[np.timedelta64, np.datetime64, np.object, np.bool]
)
else:
modin_result = modin_df.describe(
include=[np.timedelta64, np.datetime64, np.object, np.bool]
)
df_equals(modin_result, pandas_result)
modin_result = modin_df.describe(include=str(modin_df.dtypes.values[0]))
pandas_result = pandas_df.describe(include=str(pandas_df.dtypes.values[0]))
df_equals(modin_result, pandas_result)
modin_result = modin_df.describe(include=[np.number])
pandas_result = pandas_df.describe(include=[np.number])
df_equals(modin_result, pandas_result)
df_equals(modin_df.describe(include="all"), pandas_df.describe(include="all"))
modin_df = pd.DataFrame(data).applymap(str)
pandas_df = pandas.DataFrame(data).applymap(str)
try:
df_equals(modin_df.describe(), pandas_df.describe())
except AssertionError:
# We have to do this because we choose the highest count slightly differently
# than pandas. Because there is no true guarantee which one will be first,
# If they don't match, make sure that the `freq` is the same at least.
df_equals(
modin_df.describe().loc[["count", "unique", "freq"]],
pandas_df.describe().loc[["count", "unique", "freq"]],
)
def test_describe_dtypes(self):
modin_df = pd.DataFrame(
{
"col1": list("abc"),
"col2": list("abc"),
"col3": list("abc"),
"col4": [1, 2, 3],
}
)
pandas_df = pandas.DataFrame(
{
"col1": list("abc"),
"col2": list("abc"),
"col3": list("abc"),
"col4": [1, 2, 3],
}
)
modin_result = modin_df.describe()
pandas_result = pandas_df.describe()
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"periods", int_arg_values, ids=arg_keys("periods", int_arg_keys)
)
def test_diff(self, request, data, axis, periods):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.diff(axis=axis, periods=periods)
except Exception as e:
with pytest.raises(type(e)):
modin_df.diff(axis=axis, periods=periods)
else:
modin_result = modin_df.diff(axis=axis, periods=periods)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.T.diff(axis=axis, periods=periods)
except Exception as e:
with pytest.raises(type(e)):
modin_df.T.diff(axis=axis, periods=periods)
else:
modin_result = modin_df.T.diff(axis=axis, periods=periods)
df_equals(modin_result, pandas_result)
def test_drop(self):
frame_data = {"A": [1, 2, 3, 4], "B": [0, 1, 2, 3]}
simple = pandas.DataFrame(frame_data)
modin_simple = pd.DataFrame(frame_data)
df_equals(modin_simple.drop("A", axis=1), simple[["B"]])
df_equals(modin_simple.drop(["A", "B"], axis="columns"), simple[[]])
df_equals(modin_simple.drop([0, 1, 3], axis=0), simple.loc[[2], :])
df_equals(modin_simple.drop([0, 3], axis="index"), simple.loc[[1, 2], :])
pytest.raises(ValueError, modin_simple.drop, 5)
pytest.raises(ValueError, modin_simple.drop, "C", 1)
pytest.raises(ValueError, modin_simple.drop, [1, 5])
pytest.raises(ValueError, modin_simple.drop, ["A", "C"], 1)
# errors = 'ignore'
df_equals(modin_simple.drop(5, errors="ignore"), simple)
df_equals(modin_simple.drop([0, 5], errors="ignore"), simple.loc[[1, 2, 3], :])
df_equals(modin_simple.drop("C", axis=1, errors="ignore"), simple)
df_equals(modin_simple.drop(["A", "C"], axis=1, errors="ignore"), simple[["B"]])
# non-unique
nu_df = pandas.DataFrame(
zip(range(3), range(-3, 1), list("abc")), columns=["a", "a", "b"]
)
modin_nu_df = pd.DataFrame(nu_df)
df_equals(modin_nu_df.drop("a", axis=1), nu_df[["b"]])
df_equals(modin_nu_df.drop("b", axis="columns"), nu_df["a"])
df_equals(modin_nu_df.drop([]), nu_df)
nu_df = nu_df.set_index(pandas.Index(["X", "Y", "X"]))
nu_df.columns = list("abc")
modin_nu_df = pd.DataFrame(nu_df)
df_equals(modin_nu_df.drop("X", axis="rows"), nu_df.loc[["Y"], :])
df_equals(modin_nu_df.drop(["X", "Y"], axis=0), nu_df.loc[[], :])
# inplace cache issue
frame_data = random_state.randn(10, 3)
df = pandas.DataFrame(frame_data, columns=list("abc"))
modin_df = pd.DataFrame(frame_data, columns=list("abc"))
expected = df[~(df.b > 0)]
modin_df.drop(labels=df[df.b > 0].index, inplace=True)
df_equals(modin_df, expected)
midx = pd.MultiIndex(
levels=[["lama", "cow", "falcon"], ["speed", "weight", "length"]],
codes=[[0, 0, 0, 1, 1, 1, 2, 2, 2], [0, 1, 2, 0, 1, 2, 0, 1, 2]],
)
df = pd.DataFrame(
index=midx,
columns=["big", "small"],
data=[
[45, 30],
[200, 100],
[1.5, 1],
[30, 20],
[250, 150],
[1.5, 0.8],
[320, 250],
[1, 0.8],
[0.3, 0.2],
],
)
with pytest.warns(UserWarning):
df.drop(index="length", level=1)
def test_drop_api_equivalence(self):
# equivalence of the labels/axis and index/columns API's
frame_data = [[1, 2, 3], [3, 4, 5], [5, 6, 7]]
modin_df = pd.DataFrame(
frame_data, index=["a", "b", "c"], columns=["d", "e", "f"]
)
modin_df1 = modin_df.drop("a")
modin_df2 = modin_df.drop(index="a")
df_equals(modin_df1, modin_df2)
modin_df1 = modin_df.drop("d", 1)
modin_df2 = modin_df.drop(columns="d")
df_equals(modin_df1, modin_df2)
modin_df1 = modin_df.drop(labels="e", axis=1)
modin_df2 = modin_df.drop(columns="e")
df_equals(modin_df1, modin_df2)
modin_df1 = modin_df.drop(["a"], axis=0)
modin_df2 = modin_df.drop(index=["a"])
df_equals(modin_df1, modin_df2)
modin_df1 = modin_df.drop(["a"], axis=0).drop(["d"], axis=1)
modin_df2 = modin_df.drop(index=["a"], columns=["d"])
df_equals(modin_df1, modin_df2)
with pytest.raises(ValueError):
modin_df.drop(labels="a", index="b")
with pytest.raises(ValueError):
modin_df.drop(labels="a", columns="b")
with pytest.raises(ValueError):
modin_df.drop(axis=1)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_drop_transpose(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_result = modin_df.T.drop(columns=[0, 1, 2])
pandas_result = pandas_df.T.drop(columns=[0, 1, 2])
df_equals(modin_result, pandas_result)
modin_result = modin_df.T.drop(index=["col3", "col1"])
pandas_result = pandas_df.T.drop(index=["col3", "col1"])
df_equals(modin_result, pandas_result)
modin_result = modin_df.T.drop(columns=[0, 1, 2], index=["col3", "col1"])
pandas_result = pandas_df.T.drop(columns=[0, 1, 2], index=["col3", "col1"])
df_equals(modin_result, pandas_result)
def test_droplevel(self):
df = (
pd.DataFrame([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]])
.set_index([0, 1])
.rename_axis(["a", "b"])
)
df.columns = pd.MultiIndex.from_tuples(
[("c", "e"), ("d", "f")], names=["level_1", "level_2"]
)
with pytest.warns(UserWarning):
df.droplevel("a")
with pytest.warns(UserWarning):
df.droplevel("level_2", axis=1)
@pytest.mark.parametrize(
"data", test_data_with_duplicates_values, ids=test_data_with_duplicates_keys
)
@pytest.mark.parametrize(
"keep", ["last", "first", False], ids=["last", "first", "False"]
)
@pytest.mark.parametrize(
"subset", [None, ["col1", "col3", "col7"]], ids=["None", "subset"]
)
def test_drop_duplicates(self, data, keep, subset):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(
modin_df.drop_duplicates(keep=keep, inplace=False, subset=subset),
pandas_df.drop_duplicates(keep=keep, inplace=False, subset=subset),
)
modin_results = modin_df.drop_duplicates(keep=keep, inplace=True, subset=subset)
pandas_results = pandas_df.drop_duplicates(
keep=keep, inplace=True, subset=subset
)
df_equals(modin_results, pandas_results)
def test_drop_duplicates_with_missing_index_values(self):
data = {
"columns": ["value", "time", "id"],
"index": [
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
15,
20,
21,
22,
23,
24,
25,
26,
27,
32,
33,
34,
35,
36,
37,
38,
39,
40,
41,
],
"data": [
["3", 1279213398000.0, 88.0],
["3", 1279204682000.0, 88.0],
["0", 1245772835000.0, 448.0],
["0", 1270564258000.0, 32.0],
["0", 1267106669000.0, 118.0],
["7", 1300621123000.0, 5.0],
["0", 1251130752000.0, 957.0],
["0", 1311683506000.0, 62.0],
["9", 1283692698000.0, 89.0],
["9", 1270234253000.0, 64.0],
["0", 1285088818000.0, 50.0],
["0", 1218212725000.0, 695.0],
["2", 1383933968000.0, 348.0],
["0", 1368227625000.0, 257.0],
["1", 1454514093000.0, 446.0],
["1", 1428497427000.0, 134.0],
["1", 1459184936000.0, 568.0],
["1", 1502293302000.0, 599.0],
["1", 1491833358000.0, 829.0],
["1", 1485431534000.0, 806.0],
["8", 1351800505000.0, 101.0],
["0", 1357247721000.0, 916.0],
["0", 1335804423000.0, 370.0],
["24", 1327547726000.0, 720.0],
["0", 1332334140000.0, 415.0],
["0", 1309543100000.0, 30.0],
["18", 1309541141000.0, 30.0],
["0", 1298979435000.0, 48.0],
["14", 1276098160000.0, 59.0],
["0", 1233936302000.0, 109.0],
],
}
pandas_df = pandas.DataFrame(
data["data"], index=data["index"], columns=data["columns"]
)
modin_df = pd.DataFrame(
data["data"], index=data["index"], columns=data["columns"]
)
modin_result = modin_df.sort_values(["id", "time"]).drop_duplicates(["id"])
pandas_result = pandas_df.sort_values(["id", "time"]).drop_duplicates(["id"])
df_equals(modin_result, pandas_result)
def test_drop_duplicates_after_sort(self):
data = [
{"value": 1, "time": 2},
{"value": 1, "time": 1},
{"value": 2, "time": 1},
{"value": 2, "time": 2},
]
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_result = modin_df.sort_values(["value", "time"]).drop_duplicates(
["value"]
)
pandas_result = pandas_df.sort_values(["value", "time"]).drop_duplicates(
["value"]
)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize("how", ["any", "all"], ids=["any", "all"])
def test_dropna(self, data, axis, how):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
with pytest.raises(ValueError):
modin_df.dropna(axis=axis, how="invalid")
with pytest.raises(TypeError):
modin_df.dropna(axis=axis, how=None, thresh=None)
with pytest.raises(KeyError):
modin_df.dropna(axis=axis, subset=["NotExists"], how=how)
modin_result = modin_df.dropna(axis=axis, how=how)
pandas_result = pandas_df.dropna(axis=axis, how=how)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_dropna_inplace(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
pandas_result = pandas_df.dropna()
modin_df.dropna(inplace=True)
df_equals(modin_df, pandas_result)
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
pandas_df.dropna(thresh=2, inplace=True)
modin_df.dropna(thresh=2, inplace=True)
df_equals(modin_df, pandas_df)
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
pandas_df.dropna(axis=1, how="any", inplace=True)
modin_df.dropna(axis=1, how="any", inplace=True)
df_equals(modin_df, pandas_df)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_dropna_multiple_axes(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(
modin_df.dropna(how="all", axis=[0, 1]),
pandas_df.dropna(how="all", axis=[0, 1]),
)
df_equals(
modin_df.dropna(how="all", axis=(0, 1)),
pandas_df.dropna(how="all", axis=(0, 1)),
)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_dropna_multiple_axes_inplace(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_df_copy = modin_df.copy()
pandas_df_copy = pandas_df.copy()
modin_df_copy.dropna(how="all", axis=[0, 1], inplace=True)
pandas_df_copy.dropna(how="all", axis=[0, 1], inplace=True)
df_equals(modin_df_copy, pandas_df_copy)
modin_df_copy = modin_df.copy()
pandas_df_copy = pandas_df.copy()
modin_df_copy.dropna(how="all", axis=(0, 1), inplace=True)
pandas_df_copy.dropna(how="all", axis=(0, 1), inplace=True)
df_equals(modin_df_copy, pandas_df_copy)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_dropna_subset(self, request, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
if "empty_data" not in request.node.name:
column_subset = modin_df.columns[0:2]
df_equals(
modin_df.dropna(how="all", subset=column_subset),
pandas_df.dropna(how="all", subset=column_subset),
)
df_equals(
modin_df.dropna(how="any", subset=column_subset),
pandas_df.dropna(how="any", subset=column_subset),
)
row_subset = modin_df.index[0:2]
df_equals(
modin_df.dropna(how="all", axis=1, subset=row_subset),
pandas_df.dropna(how="all", axis=1, subset=row_subset),
)
df_equals(
modin_df.dropna(how="any", axis=1, subset=row_subset),
pandas_df.dropna(how="any", axis=1, subset=row_subset),
)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_dropna_subset_error(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data) # noqa F841
# pandas_df is unused so there won't be confusing list comprehension
# stuff in the pytest.mark.parametrize
with pytest.raises(KeyError):
modin_df.dropna(subset=list("EF"))
if len(modin_df.columns) < 5:
with pytest.raises(KeyError):
modin_df.dropna(axis=1, subset=[4, 5])
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_dot(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
col_len = len(modin_df.columns)
# Test list input
arr = np.arange(col_len)
modin_result = modin_df.dot(arr)
pandas_result = pandas_df.dot(arr)
df_equals(modin_result, pandas_result)
# Test bad dimensions
with pytest.raises(ValueError):
modin_result = modin_df.dot( | np.arange(col_len + 10) | numpy.arange |
import codecs
import os
import random
import pickle
import sys
import numpy as np
import tensorflow as tf
from tqdm import tqdm
from transformers import BertTokenizer, TFBertModel
from io_utils.io_utils import load_data
from data_processing.feature_extraction import calc_features
from data_processing.feature_extraction import calc_features_and_labels
def main():
random.seed(42)
np.random.seed(42)
tf.random.set_seed(42)
if len(sys.argv) < 2:
err_msg = 'The source file is not specified!'
raise ValueError(err_msg)
src_fname = os.path.normpath(sys.argv[1])
if len(sys.argv) < 3:
err_msg = 'The BERT model name is not specified!'
raise ValueError(err_msg)
bert_path = os.path.normpath(sys.argv[2])
if len(sys.argv) < 4:
err_msg = 'The destination file with features is not specified!'
raise ValueError(err_msg)
dst_fname = os.path.normpath(sys.argv[3])
if len(sys.argv) < 5:
err_msg = 'The source data kind is not specified! ' \
'Possible values: text, annotation.'
raise ValueError(err_msg)
source_data_kind = sys.argv[4].strip().lower()
if source_data_kind not in {'text', 'annotation'}:
err_msg = f'{sys.argv[4]} is wrong source data kind!' \
f'Possible values: text, annotation.'
raise ValueError(err_msg)
if len(sys.argv) < 6:
err_msg = 'The maximal sentence length is not specified!'
raise ValueError(err_msg)
try:
max_len = int(sys.argv[5])
except:
max_len = 0
if max_len <= 0:
err_msg = f'The maximal sentence length = {sys.argv[5]} ' \
f'is inadmissible!'
raise ValueError(err_msg)
if source_data_kind == 'annotation':
if len(sys.argv) < 7:
err_msg = 'The named entity vocabulary is not specified!'
raise ValueError(err_msg)
ne_voc_fname = os.path.normpath(sys.argv[6])
if not os.path.isfile(ne_voc_fname):
err_msg = f'The file "{ne_voc_fname}" does not exist!'
raise IOError(err_msg)
with codecs.open(ne_voc_fname, mode='r', encoding='utf-8') as fp:
named_entity_list = list(filter(
lambda it2: len(it2) > 0,
map(lambda it1: it1.strip(), fp.readlines())
))
if len(named_entity_list) < 1:
raise ValueError(f'The file "{ne_voc_fname}" is empty!')
else:
named_entity_list = []
if not os.path.isfile(src_fname):
err_msg = f'The file "{src_fname}" does not exist!'
raise IOError(err_msg)
if len(dst_fname.strip()) == 0:
raise ValueError('The destination file name is empty!')
dst_dir = os.path.dirname(dst_fname)
if len(dst_dir) > 0:
if not os.path.isdir(dst_dir):
err_msg = f'The directory "{dst_dir}" does not exist!'
raise IOError(err_msg)
bert_tokenizer = BertTokenizer.from_pretrained(bert_path)
bert_model = TFBertModel.from_pretrained(bert_path)
features = []
if source_data_kind == 'annotation':
labels = [[] for _ in range(len(named_entity_list))]
source_data = load_data(src_fname)
for cur_id in tqdm(sorted(list(source_data.keys()))):
text, ners = source_data[cur_id]
X, y = calc_features_and_labels(
bert_tokenizer,
bert_model,
max_len,
named_entity_list,
text, ners
)
features.append(X)
for idx in range(len(named_entity_list)):
labels[idx].append(y[idx])
features = np.vstack(features)
for idx in range(len(named_entity_list)):
labels[idx] = | np.vstack(labels[idx]) | numpy.vstack |
from models import CNN5
from core.Optimizers import sgd, bgd
from core.Functions import one_hot_f
import numpy as np
from tensorflow import keras
from core.Dataloader import batch_iterator
import cupy as cp
def test(model, test_inputs, test_labels):
num_of_sample = test_inputs.shape[0]
cnt_correct, cnt_tot = 0, 0
if model.gpu_backend:
test_inputs = cp.array(test_inputs)
test_labels = cp.array(test_labels)
for i in range(num_of_sample):
test_input = test_inputs[i:i + 1]
test_label = test_labels[i]
res = model.forward_prop(test_input)
if cp.argmax(res) == cp.argmax(test_label):
cnt_correct += 1
cnt_tot += 1
else:
for i in range(num_of_sample):
test_input = test_inputs[i:i + 1]
test_label = test_labels[i]
res = model.forward_prop(test_input)
if np.argmax(res) == np.argmax(test_label):
cnt_correct += 1
cnt_tot += 1
acc = cnt_correct / cnt_tot
print('[ accuracy ]: ', acc * 100)
return acc
fashion_mnist = keras.datasets.fashion_mnist
(train_images, train_labels), (test_images, test_labels) = fashion_mnist.load_data()
train_images = | np.expand_dims(train_images / 255, axis=-1) | numpy.expand_dims |
"""
BlackthornFeatures.py
Author: <NAME> (<EMAIL>)
"""
import h5py
import multiprocessing as mp
import numpy as np
import os
import random
import scipy.sparse as sparse
import time
from aimodel.commons import t
from data.ImagenetShuffleFeatureExtractor import ImagenetShuffleFeatureExtractor # noqa E501
class BlackthornFeatures:
"""
The features for the interactive learning models are based on Blackthorn,
they are very time-efficient (enabling real-time interactivity on even
large dataset) without sacrificing too much of accuracy.
Essentially operates in two modes. BlackthornFeatures instance is used in
the live system - it loads the features into the system, and instance-level
methods properly handle them. Class-level methods are used in dataset
processing, i.e., compressing the extracted features into a sparse
representation.
"""
N_SUBCHUNKS_PER_PROCESS = 4
DEFAULT_N_PROCESSES = 1
DEFAULT_N_FEAT_PER_IMG = 50
def __init__(self, features_path):
"""
Constructor.
Parameters
----------
features_path : str
The path to where the compressed features are located.
"""
self.features = sparse.load_npz(features_path)
self.n = self.features.shape[0]
self.n_feat = self.features.shape[1]
self.rand_candidates_all = set(range(self.n))
def get(self, idx, n_random_fill=0):
"""
Fetches the compressed features corresponding to the given row indices.
Parameters
----------
idx : list
The list of requested row (image) indices.
n_random_fill : int
The number of images to be randomly sampled in addition to the
features specified in idx. Default: 0.
Returns
-------
np.array
A 2-D array with rows corresponding to the requested image indices,
and columns being the features.
"""
if n_random_fill > 0:
random_candidates = self.rand_candidates_all - set(idx)
idx += random.sample(random_candidates, n_random_fill)
return self.features[idx, :]
def all(self):
"""
Returns the complete feature matrix.
Returns
-------
np.array
A 2-D array with rows being the images and columns the features.
"""
return self.features
@classmethod
def compress(cls, dataset_config):
"""
Given the dataset config, takes the concept feature representation of
a dataset and compresses the features into a sparse matrix.
Parameters
----------
dataset_config : dict
A valid dataset config (see the README for formatting specs)
"""
# Dataset config shortcuts
root_dir = dataset_config["root_dir"]
il_raw_features_path =\
os.path.join(root_dir, dataset_config["il_raw_features_path"])
il_features_path =\
os.path.join(root_dir, dataset_config["il_features_path"])
n_processes = dataset_config["il_n_processes"]
n_feat_comp = dataset_config["il_n_feat_per_image"]
# First, validate the inputs (establishing data dims in the process)
n, n_feat, process_chunks =\
cls._prepare_compression(il_raw_features_path,
il_features_path, n_processes)
print("%s +++ COMPRESSING +++" % t())
stopwatch = time.time()
# Start the workers that perform the compression
processes = [None for x in range(n_processes)]
for p_id in range(n_processes):
worker_args = (p_id, n_processes,
il_raw_features_path, il_features_path,
n, n_feat, n_feat_comp, process_chunks)
processes[p_id] =\
mp.Process(target=cls._compress_worker,
args=worker_args)
processes[p_id].start()
for p_id in range(n_processes):
processes[p_id].join()
# Merge the compressed features into a single matrix, iterating over
# all feature selection-n compressed features combinations
worker_comp_features_all = []
for p_id in range(n_processes):
# Append the worker features to the list
worker_comp_features_path =\
cls._worker_comp_features_path(p_id, il_features_path)
worker_comp_features = sparse.load_npz(worker_comp_features_path)
worker_comp_features_all.append(worker_comp_features)
# Mop up worker feature files
os.remove(worker_comp_features_path)
comp_features = sparse.vstack(worker_comp_features_all)
# Sanity check - the compressed features should have the same
# dimensions as the original ones
if comp_features.shape != (n, n_feat):
err = (("Compression sanity check failed: the compressed feature "
"matrix dimensions (%sx%s) do not match the original ones "
"(%s x %s).")
% (comp_features.shape[0], comp_features.shape[1],
n, n_feat))
raise BlackthornFeaturesError(err)
# Write the feature file
sparse.save_npz(il_features_path, comp_features)
print("%s +++ COMPRESSION COMPLETE (%s s) +++"
% (t(), round(time.time() - stopwatch, 2)))
@classmethod
def _compress_worker(cls,
p_id, n_processes,
il_raw_features_path, il_features_path,
n, n_feat, n_feat_comp, process_chunks):
"""
The worker method that performs the actual compression on its data
chunks (further subchunks to avoid memory issues, as data is copied in
the process).
Parameters
----------
p_id : int
The process ID.
n_processes : int
The total number of worker processes.
il_raw_features_path : str
The absolute path to the uncompressed (raw) features.
il_features_path : str
The path where the compressed features will be stored.
n : int
The number of images in the collection.
n_feat : int
The number of features.
n_feat_comp : int
The number of features to be preserved (compressed), each one
beyond this number in the top-features-by-value ranking will be set
to 0.
process_chunks : dict
The data chunks as computed by the _prepare_compression()
method
"""
# We need to split chunks into subchunks due to the data being copied
n_subchunks = cls.N_SUBCHUNKS_PER_PROCESS * n_processes
# Prepare the feature container
worker_comp_features = sparse.csr_matrix((0, n_feat))
# Go over the data chunks
for chunk in process_chunks[p_id]:
n_subchunk = (chunk["i_end"] - chunk["i_start"]) // n_subchunks
# Open the dataset
with h5py.File(il_raw_features_path, "r") as feat_f:
features = feat_f[chunk["feat_submat"]]
for subchunk in range(n_subchunks):
i_start_sc = chunk["i_start"] + subchunk * n_subchunk
if subchunk == n_subchunks - 1:
i_end_sc = chunk["i_end"]
else:
i_end_sc =\
chunk["i_start"] + (subchunk + 1) * n_subchunk
n_sc = i_end_sc - i_start_sc
# Establish the feature submatrix
X = np.copy(features[i_start_sc: i_end_sc, :])
# Establish the top feature indices.
feat_argsort = np.argsort(-X)
# Trim the argsort to the indices of actually kept
# features. This is now a 2-D matrix with each row
# corresponding to the indices of the top features
# kept for each image.
top_feat_idx =\
np.copy(feat_argsort[:, :n_feat_comp])
# Initialize the nullifier matrix, which will be
# used to set the NOT kept features to zero. The
# nullifier dimensions are equal to the chunk
# feature matrix dimensions, each item is 1 if the
# feature is kept and 0 if the feature is not kept.
# We initialize to all 0s
nullifier = | np.zeros((n_sc, n_feat)) | numpy.zeros |
# environment setting
import os
os.chdir(os.path.split(os.path.realpath(__file__))[0])
import warnings
warnings.filterwarnings('ignore')
# external packages
import numpy as np
import torch
import json
import copy
import time
import torch.nn.functional as F
from torch.distributions import MultivariateNormal
from torch.nn import DataParallel
from tqdm import tqdm
# self writing files
import constants
import networks
from reward_fun import reward_fun
from simulator import disease_model
from util import *
class GraphRL(object):
def __init__(self,
MSA_name='Atlanta',
vaccine_day=200,
step_length=24,
num_seed=128,
num_episode=300,
num_epoch=10,
batch_size=512,
buffer_size=8192,
save_interval=1,
lr=1e-4,
soft_replace_rate=0.5,
gamma=0.6,
epsilon=0.2,
SPR_weight=2,
manual_seed=0):
super().__init__()
print('Initializing...')
# generate config
config_data=locals()
del config_data['self']
del config_data['__class__']
time_data=time.strftime('%Y-%m-%d_%H-%M-%S', time.localtime())
config_data['time']=time_data
# environment
self.device='cuda' if torch.cuda.is_available() else 'cpu'
self.manual_seed=manual_seed
torch.manual_seed(self.manual_seed)
if self.device=='cuda':
torch.cuda.manual_seed(self.manual_seed)
np.random.seed(self.manual_seed)
dgl.seed(self.manual_seed)
# loading cbg data (for simulation)
self.MSA_name=MSA_name
self.data=load_data(self.MSA_name)
self.poi_areas=self.data['poi_areas']
self.poi_times=self.data['poi_times']
self.cbg_ages=self.data['cbg_ages']# cbg*23groups
self.cbg_sizes=self.data['cbg_sizes']
self.poi_cbg_visits_list=self.data['poi_cbg_visits_list']# time_length*poi*cbg
self.time_length=len(self.poi_cbg_visits_list)
self.day_length=int(self.time_length/24)
self.step_length=step_length
assert len(self.cbg_ages)==len(self.cbg_sizes)
assert len(self.cbg_ages)==self.poi_cbg_visits_list[0].shape[-1]
self.num_cbg=len(self.cbg_ages)
self.sum_population=np.sum(self.cbg_sizes)
assert len(self.poi_areas)==self.poi_cbg_visits_list[0].shape[0]
self.num_poi=len(self.poi_areas)
# simulator
self.num_seed=num_seed
self.batch_size=batch_size
self.seeds=range(self.num_seed)
self.simulator=disease_model.Model(starting_seed=self.seeds,num_seeds=self.num_seed)
self.simulator.init_exogenous_variables(poi_areas=self.poi_areas,
poi_dwell_time_correction_factors=self.data['poi_dwell_time_correction_factors'],
cbg_sizes=self.cbg_sizes,
poi_cbg_visits_list=self.poi_cbg_visits_list,
cbg_attack_rates_original = self.data['cbg_attack_rates_scaled'],
cbg_death_rates_original = self.data['cbg_death_rates_scaled'],
p_sick_at_t0=constants.parameters_dict[self.MSA_name][0],
home_beta=constants.parameters_dict[self.MSA_name][1],
poi_psi=constants.parameters_dict[self.MSA_name][2],
just_compute_r0=False,
latency_period=96, # 4 days
infectious_period=84, # 3.5 days
confirmation_rate=.1,
confirmation_lag=168, # 7 days
death_lag=432)
# loading community data
self.data_community=load_data_aggregate(self.MSA_name)
self.community_ages=self.data_community['community_ages']# cbg*23groups
self.community_sizes=self.data_community['community_sizes']
self.community_index=self.data_community['community_index']
self.poi_community_visits_list=self.data_community['poi_community_visits_list']# time_length*poi*cbg
assert len(self.community_ages)==len(self.community_sizes)
assert len(self.community_ages)==self.poi_community_visits_list[0].shape[-1]
assert len(self.community_index)==self.num_cbg
self.num_community=len(self.community_ages)
# dynamic features
self.community_state_num=np.zeros((self.num_seed,3,self.num_community))#S,C,D
self.community_state_record=np.zeros((self.num_seed,self.step_length,3,self.num_community))
self.community_casediff=np.zeros((self.num_seed,3,self.num_community))
self.cbg_case_num=np.zeros((self.num_seed,self.num_cbg))
self.cbg_case_num_old=np.zeros((self.num_seed,self.num_cbg))
# static features (normalized)
self.community_ages_norm=array_norm(self.community_ages,clip=100)# cbg*23
self.poi_areas_norm=array_norm(self.poi_areas,clip=99)[:,np.newaxis]# poi*1
self.poi_times_norm=array_norm(self.poi_times,clip=99)[:,np.newaxis]# poi*1
self.community_ages_norm=torch.FloatTensor(self.community_ages_norm).to(self.device)
self.poi_areas_norm=torch.FloatTensor(self.poi_areas_norm).to(self.device)
self.poi_times_norm=torch.FloatTensor(self.poi_times_norm).to(self.device)
self.community_ages_norm_repeated_batch=self.community_ages_norm.repeat(self.batch_size,1,1)
self.poi_areas_norm_repeated_batch=self.poi_areas_norm.repeat(self.batch_size,1,1)
self.poi_times_norm_repeated_batch=self.poi_times_norm.repeat(self.batch_size,1,1)
self.community_ages_norm_repeated_seed=self.community_ages_norm.repeat(self.num_seed,1,1)
self.poi_areas_norm_repeated_seed=self.poi_areas_norm.repeat(self.num_seed,1,1)
self.poi_times_norm_repeated_seed=self.poi_times_norm.repeat(self.num_seed,1,1)
# network features (normalized)
poi_community_visits_day=list()
for i in range(self.day_length):
poi_community_visits_day.append(np.sum(self.poi_community_visits_list[i*24:i*24+24]))
self.poi_visit_day=list()
for i in range(len(poi_community_visits_day)):
self.poi_visit_day.append(np.sum(poi_community_visits_day[i],axis=1))
self.poi_visit_day=np.array(self.poi_visit_day)
self.poi_visit_day_norm=array_norm(self.poi_visit_day,clip=100)
self.poi_visit_day_norm=torch.FloatTensor(self.poi_visit_day_norm).to(self.device)
poi_community_visits_day_norm=sparse_mat_list_norm(poi_community_visits_day,clip=99)
self.poi_community_visits_day_network=list()
print('Building graphs...')
for i in tqdm(range(len(poi_community_visits_day_norm))):
net,edge_weights=BuildGraph(poi_community_visits_day_norm[i])
self.poi_community_visits_day_network.append((net.to(self.device),edge_weights.to(self.device)))
# vaccine number
self.vacine_day=int((vaccine_day/37367)*self.sum_population)
# replay buffers
self.buffer_size=buffer_size
self.buffer_pointer=0
self.buffer_sa=np.zeros((self.buffer_size,self.step_length,3,self.num_community))
self.buffer_sadiff=np.zeros((self.buffer_size,3,self.num_community))
self.buffer_sb=np.zeros((self.buffer_size),dtype=int)
self.buffer_a=np.zeros((self.buffer_size,self.num_community))
self.buffer_r=np.zeros(self.buffer_size)
self.buffer_sa1=np.zeros((self.buffer_size,self.step_length,3,self.num_community))
self.buffer_sadiff1=np.zeros((self.buffer_size,3,self.num_community))
self.buffer_sb1=np.zeros((self.buffer_size),dtype=int)
self.buffer_logp=np.zeros(self.buffer_size)
# training trackors
self.num_episode=num_episode
self.num_epoch=num_epoch
self.save_interval=save_interval
self.episode_deaths_trackor=list()
self.episode_cases_trackor=list()
self.critic_loss_trackor=list()
self.actor_loss_trackor=list()
self.SPR_loss_trackor=list()
# networks
self.online_encoder=DataParallel(networks.Encoder(step_length=self.step_length).to(self.device))
self.online_GCN=networks.GCN().to(self.device)
self.online_projector=DataParallel(networks.Projector(num_cbg=self.num_community).to(self.device))
self.transition=DataParallel(networks.Transition().to(self.device))
self.predictor=DataParallel(networks.Predictor().to(self.device))
self.PPO_actor=DataParallel(networks.Actor(num_cbg=self.num_community).to(self.device))
self.PPO_critic=DataParallel(networks.Critic(num_cbg=self.num_community).to(self.device))
self.target_encoder=copy.deepcopy(self.online_encoder)
self.target_encoder.eval()
self.target_GCN=copy.deepcopy(self.online_GCN)
self.target_GCN.eval()
self.target_projector=copy.deepcopy(self.online_projector)
self.target_projector.eval()
self.soft_replace_rate=soft_replace_rate
self.gamma=gamma
self.epsilon=epsilon
self.SPR_weight=SPR_weight
# optimizers
self.lr=lr
self.online_encoder_opt=torch.optim.Adam(self.online_encoder.parameters(),lr=self.lr)
self.online_GCN_opt=torch.optim.Adam(self.online_GCN.parameters(),lr=self.lr)
self.transition_opt=torch.optim.Adam(self.transition.parameters(),lr=self.lr)
self.online_projector_opt=torch.optim.Adam(self.online_projector.parameters(),lr=self.lr)
self.predictor_opt=torch.optim.Adam(self.predictor.parameters(),lr=self.lr)
self.PPO_actor_opt=torch.optim.Adam(self.PPO_actor.parameters(),lr=self.lr)
self.PPO_critic_opt=torch.optim.Adam(self.PPO_critic.parameters(),lr=self.lr)
print(f'Training platform on {self.MSA_name} initialized')
print(f'Number of communities={self.num_community}')
print(f'Number of POIs={self.num_poi}')
print(f'Total population={self.sum_population}')
print(f'Time length={self.time_length}')
print(f'Train with {self.num_seed} random seeds')
# making output directory
self.output_dir=os.path.join('..','model',f'{self.MSA_name}_aggregate_{self.num_seed}seeds_{time_data}')
os.mkdir(self.output_dir)
with open(os.path.join(self.output_dir,'config.json'),'w') as f:
json.dump(config_data,f)
def test_simulation(self):
for num in range(1):
self.simulator.reset_random_seed()
self.simulator.init_endogenous_variables()
# mat=500*np.ones((self.num_seed,self.num_cbg))
# mat[:30,:]-=300
# self.simulator.add_vaccine(mat)
for i in range(63):
# if i==20:
# mat=500*np.ones((self.num_seed,self.num_cbg))
# self.simulator.add_vaccine(mat)
self.simulator.simulate_disease_spread(no_print=True)
T1,L_1,I_1,R_1,C2,D2,total_affected, history_C2, history_D2, total_affected_each_cbg=self.simulator.output_record(full=True)
gt_result_root=os.path.join('..','model','simulator_test')
if not os.path.exists(gt_result_root):
os.mkdir(gt_result_root)
savepath = os.path.join(gt_result_root, f'cases_cbg_no_vaccination_{self.MSA_name}_{self.num_seed}seeds_step_raw{num}a.npy')
np.save(savepath, history_C2)
savepath = os.path.join(gt_result_root, f'deaths_cbg_no_vaccination_{self.MSA_name}_{self.num_seed}seeds_step_raw{num}a.npy')
np.save(savepath, history_D2)
def test_network(self):
community_state_record=torch.FloatTensor(self.community_state_record).to(self.device)
print(community_state_record.shape)
community_statediff=community_state_record[:,-1,:,:].unsqueeze(1)
print(community_statediff.shape)
index=[0]*self.num_seed
g,edge_weight,poi_visits=self.get_indexed_vectors(index)
cbg_encode,poi_encode=self.online_encoder(community_state_record,community_statediff,self.community_ages_norm_repeated_seed,poi_visits,self.poi_areas_norm_repeated_seed,self.poi_times_norm_repeated_seed)
print(cbg_encode.shape)
print(poi_encode.shape)
cbg_embeddings=self.online_GCN(g,edge_weight,cbg_encode,poi_encode)
print(cbg_embeddings.shape)
mu,sigma=self.PPO_actor(cbg_embeddings)
value=self.PPO_critic(cbg_embeddings)
print(mu.shape)
print(sigma.shape)
print(value.shape)
action,_=self.get_action(sigma,mu)
print(action.shape)
cbg_embeddings_new=self.transition(cbg_embeddings,action)
print(cbg_embeddings_new.shape)
projection1=self.online_projector(cbg_embeddings_new)
projection2=self.online_projector(cbg_embeddings)
print(projection1.shape)
print(projection2.shape)
prediction=self.predictor(projection1)
print(prediction.shape)
def get_indexed_vectors(self,index):
g_list=list()
edge_weight_list=list()
for i in range(len(index)):
g,edge_weight=self.poi_community_visits_day_network[index[i]]
g_list.append(g)
edge_weight_list.append(edge_weight)
poi_visits=self.poi_visit_day_norm[index,:]
return g_list,edge_weight_list,poi_visits
def update_community_state(self,current_C,current_D):
self.community_state_record[:,:,1,:]=current_C
self.community_state_record[:,:,2,:]=current_D
self.community_state_record[:,:,0,:]=self.community_sizes-self.community_state_record[:,:,1,:]-self.community_state_record[:,:,2,:]
self.community_state_num=self.community_state_record[:,-1,:,:]
def get_action(self,mu,sigma,action=None):
batch_size=len(mu)
eye=torch.eye(self.num_community).to(self.device).repeat(batch_size,1,1)
sigma_mat=eye*(torch.square(sigma).unsqueeze(1))
dist = MultivariateNormal(mu, sigma_mat)
if action==None:
action = dist.sample()
log_prob = dist.log_prob(action)
return action,log_prob
else:
log_prob = dist.log_prob(action)
return log_prob
def get_vaccine(self,actions):
# actions=actions-2*(np.min(actions,axis=1)[:,np.newaxis])
actions=np.exp(actions)
actions=actions/(np.sum(actions,axis=1)[:,np.newaxis])
vaccine_mat_community=self.vacine_day*actions
pop=self.cbg_sizes
cases=self.cbg_case_num-self.cbg_case_num_old
vaccine_mat=np.empty((self.num_seed,self.num_cbg))
for i in range(self.num_community):
cases_community=cases[:,self.community_index==i]
cases_community_sum=np.sum(cases_community,axis=1)
proportion_cases_community=cases_community/cases_community_sum[:,np.newaxis]
pop_community=pop[self.community_index==i]
proportion_pop_community=pop_community/np.sum(pop_community)
proportion_cases_community[cases_community_sum==0,:]=proportion_pop_community
vaccine_mat[:,self.community_index==i]=vaccine_mat_community[:,i][:,np.newaxis]*proportion_cases_community
return vaccine_mat.astype(int)
def SPR_loss_fun(self,input,target):
input_norm=torch.norm(input,dim=1)
target_norm=torch.norm(target,dim=1)
input_normed=input/input_norm.unsqueeze(1)
target_normed=target/target_norm.unsqueeze(1)
SPR_loss=-torch.mean(torch.sum(input_normed*target_normed,dim=1))
return SPR_loss
def update(self):
batch_count=0
critic_loss_sum=0
actor_loss_sum=0
SPR_loss_sum=0
self.buffer_r=(self.buffer_r - self.buffer_r.mean())/(self.buffer_r.std() + 1e-10)
sa=torch.FloatTensor(self.buffer_sa).to(self.device)
sadiff=torch.FloatTensor(self.buffer_sadiff).to(self.device).unsqueeze(1)
r=torch.FloatTensor(self.buffer_r).to(self.device)
a=torch.FloatTensor(self.buffer_a).to(self.device)
logp=torch.FloatTensor(self.buffer_logp).to(self.device)
sa1=torch.FloatTensor(self.buffer_sa1).to(self.device)
sadiff1=torch.FloatTensor(self.buffer_sadiff1).to(self.device).unsqueeze(1)
target_v=torch.FloatTensor(self.buffer_pointer,1).to(self.device)
advantage=torch.FloatTensor(self.buffer_pointer,1).to(self.device)
with torch.no_grad():
index_t=np.array(range(self.buffer_pointer))
batch_pointer_t=0
batch_num_t=int(self.buffer_pointer/self.batch_size)
for batch_t in range(batch_num_t):
batch_index_t=index_t[batch_pointer_t:batch_pointer_t+self.batch_size]
sa_batch_t=sa[batch_index_t]
sadiff_batch_t=sadiff[batch_index_t]
sb_batch_t=self.buffer_sb[batch_index_t]
r_batch_t=r[batch_index_t]
sa1_batch_t=sa1[batch_index_t]
sadiff1_batch_t=sadiff1[batch_index_t]
sb1_batch_t=self.buffer_sb1[batch_index_t]
g_batch_t,edge_weight_batch_t,poi_visits_batch_t=self.get_indexed_vectors(sb_batch_t)
community_encode_batch_t,poi_encode_batch_t=self.online_encoder(sa_batch_t,sadiff_batch_t,self.community_ages_norm_repeated_batch,poi_visits_batch_t,self.poi_areas_norm_repeated_batch,self.poi_times_norm_repeated_batch)
community_embeddings_batch_t=self.online_GCN(g_batch_t,edge_weight_batch_t,community_encode_batch_t,poi_encode_batch_t)
g1_batch_t,edge_weight1_batch_t,poi_visits1_batch_t=self.get_indexed_vectors(sb1_batch_t)
community_encode1_batch_t,poi_encode1_batch_t=self.online_encoder(sa1_batch_t,sadiff1_batch_t,self.community_ages_norm_repeated_batch,poi_visits1_batch_t,self.poi_areas_norm_repeated_batch,self.poi_times_norm_repeated_batch)
community_embeddings1_batch_t=self.online_GCN(g1_batch_t,edge_weight1_batch_t,community_encode1_batch_t,poi_encode1_batch_t)
target_v_batch_t=r_batch_t.unsqueeze(1)+self.gamma*self.PPO_critic(community_embeddings1_batch_t)
advantage_batch_t = (target_v_batch_t - self.PPO_critic(community_embeddings_batch_t))
target_v[batch_t*self.batch_size:batch_t*self.batch_size+self.batch_size]=target_v_batch_t
advantage[batch_t*self.batch_size:batch_t*self.batch_size+self.batch_size]=advantage_batch_t
for _ in range(self.num_epoch):
index=np.array(range(self.buffer_pointer))
| np.random.shuffle(index) | numpy.random.shuffle |
# ====================================================================
# Author : swc21
# Date : 2018-03-14 11:49:48
# Project : GitHub
# File Name : dist_bsize_cluster
# Last Modified by : swc21
# Last Modified time : 2018-03-15 19:59:16
# ====================================================================
#
# --[IMPORTS]---------------------------------------------------------------- #
import numpy as np
import time
from mpi4py import MPI
from random import shuffle
# --[PROGRAM-OPTIONS]-------------------------------------------------------- #
# box size params (Kpc)
box_min = 50
box_max = 300
box_step = 10
boxsizes = range(box_min, box_max, box_step)
# distance params (Mpc)
d_min = 75
d_max = 1101
d_step = (d_max-d_min)/len(boxsizes)
distances = [i/100.0 for i in range(d_min, d_max, d_step)]
# MPI params
comm = MPI.COMM_WORLD
mpisize = comm.Get_size()
rank = comm.Get_rank()
# path params
load_path1 = '/root/SHARED/Halos1/'
load_path2 = '/root/SHARED/Halos2/'
load_path3 = '/root/SHARED/Halos3/'
load_local = load_path3 # '/root/Arrays/'
save_path = '/root/SHARED/Data_out/Arrays/'
# number of projections to make for each halo
n_projections = 10
# number of jobs to go before saving array
save_interval = 5
# array params
dataarray = np.zeros((len(distances), len(boxsizes), 26))
# --[FUNCTIONS]-------------------------------------------------------------- #
def rotation_matrix(ax, th):
'''
Input ax : list of two zeros and a one. ie. [0,1,0]
theta : radians to rotate about the axis axs
Output the rotation matrix to use np.dot() with
'''
axis = np.asarray(ax)
theta = np.asarray(th)
axis = axis/(np.dot(ax, ax))**2
a = np.cos(np.divide(theta, 2.0))
b, c, d = np.multiply(-axis, np.sin(np.divide(theta, 2.0)))
aa, bb, cc, dd = a*a, b*b, c*c, d*d
bc, ad, ac, ab, bd, cd = b*c, a*d, a*c, a*b, b*d, c*d
return ([aa+bb-cc-dd, 2.0*(bc+ad), 2.0*(bd-ac)],
[2.0*(bc-ad), aa+cc-bb-dd, 2.0*(cd+ab)],
[2.0*(bd+ac), 2.0*(cd-ab), aa+dd-bb-cc])
def rotate(xyz, axs, theta):
'''
Input xyz : dictionary of np.arrays
axs : list of two zeros and a one. ie. [0,1,0]
theta : radians to rotate about the axis axs
Output numpy array of size three corresponding to the three new
position arrays
'''
rot_matrix = rotation_matrix(axs, theta)
try:
new_xyz = np.asarray(np.dot(rot_matrix, [xyz[0], xyz[1], xyz[2]]))
except MemoryError:
print(' [MemoryError caught]')
new_xyz = np.asarray(
np.dot(rot_matrix, [xyz[0][:100], xyz[1][:100], xyz[2][:100]]))
return [new_xyz[0], new_xyz[1], new_xyz[2]]
def kpc_box_at_distance(distance, kpc):
'''
Input distance in Mpc
Output square degrees
((206265.0*(_kpc_/1e2)/distance*1e1)/3600)^2
'''
return np.square(np.divide(np.divide(206265.0*kpc, distance*1e3), 3600.0))
def calculate_nFov(kpc, d):
'''
Input distance in Mpc
Output number of FOV for a box covering _kpc_ radius at input distance
((square deg of box) / WFIRST square degree FOV)
'''
WFIRST_FOV = 0.79 * 0.43
n_fov = round(np.divide(kpc_box_at_distance(d, kpc), WFIRST_FOV), 2)
if n_fov < 1.0:
return int(1)
else:
return n_fov
def calculate_app_mag(cutoff=26.15, t_fov=1000.0, t_exp=1000.0):
'''
Inputs are all defined
--> later on this can be used to measure differences from using
longer exposures, different total times and
different filters.
Outputs apparent magnitude
'''
return cutoff+2.5*np.log10(t_fov/t_exp)
def calculate_abs_mag(distance=1.0, app_mag=calculate_app_mag()):
'''
Input distance Mpc & apparent magnitude
Output Absolute Magnitude limit for given distance
'''
return app_mag-(5.0*np.log10(distance*1e5))
def prep_stars_1(
halo, keys, dlim,
lpl=load_local, lp1=load_path1,
lp2=load_path2, lp3=load_path3):
'''
Input halo : halo name as a string
keys : list of keys to load as strings
dlim : list of indices as int
lpl : load local paths as a string
lp1 : load path 1 as a string
lp2 : load path 2 as a string
lp3 : load path 3 as a string
Output a dictionary of numpy arrays
'''
if rank % 3 == 0:
path = lp1
elif rank % 7 == 0:
path = lp2
else:
path = lp3
if keys == ['px', 'py', 'pz']:
arrays = []
path = lpl
for key in keys:
arrays.append(np.load(path+str(halo)+'/'+str(key)+'.npy')[dlim])
return arrays
else:
return np.load(path+str(halo)+'/'+str(keys[0])+'.npy')[dlim]
def load_mags(halo, mag_filter='dcmc_ks', lpl=load_local):
'''
Input halo : halo name as a string
mag_filter : key for filter band to load as
string * must match galaxia name
lpl : load local path as a string
Output np.array of magnitudes
'''
return np.load(lpl+str(halo)+'/'+mag_filter+'.npy')
def find_lims(a, b, lim_, step_=box_step):
'''
Input a : numpy array acting as the X values
b : numpy array acting as the Y values
lim_ : box_size limit in Kpc
(the size of box on it's end)
step_ : interval to increase box_size Kpc
Output a list of indices for included stars
'''
step = step_/2.0
lm = lim_/2.0
alim_inr = np.logical_and(-lm < a, a < lm)
blim_inr = np.logical_and(-lm < b, b < lm)
inner_box = np.nonzero(np.logical_and(alim_inr, blim_inr))[0]
alim_otr = np.logical_and(-(lm+step) < a, a < (lm+step))
blim_otr = np.logical_and(-(lm+step) < b, b < (lm+step))
outter_box = np.nonzero(np.logical_and(alim_otr, blim_otr))[0]
return np.setdiff1d(outter_box, inner_box, assume_unique=True)
def find_dlims(Ks, abs_m):
'''
returns list of indices's of stars visible within given range
'''
return np.nonzero(Ks < abs_m)[0]
def mix(lst, n):
'''
shuffles list n times, returns shuffled list as type list
'''
for i in range(n):
shuffle(lst)
return lst
# --[OPTIONS]---------------------------------------------------------------- #
# names for halo files
halos = ['halo02', 'halo05', 'halo07',
'halo08', 'halo09', 'halo10',
'halo12', 'halo14', 'halo15',
'halo17', 'halo20']
# misc print
line_ = ' --------------------------'
line = '\n'+line_+'\n'
line2_ = '#############################'
line2 = '\n'+line2_+line2_+'\n'
# job counter/ticker
n_jobs_done = 1
# rotation matrix axii
r_ax1 = [0, 0, 1]
r_ax2 = [0, 1, 0]
r_ax3 = [1, 0, 0]
# --[MAIN]------------------------------------------------------------------- #
# save initial empty array
np.save(save_path+str(rank)+'array.npy', dataarray)
# --[SETUP]------------------------------------------------------------------ #
# rank 0 creates work lists for each worker
if rank == 0:
jobs = [[k, j, distance, size] for k, distance in enumerate(
distances) for j, size in enumerate(boxsizes)]
mix(jobs, 2)
cmds = [jobs[i:i + len(jobs)//mpisize+1]
for i in range(0, len(jobs), (len(jobs)//mpisize)+1)]
mix(cmds, 2)
else:
cmds = None
# opening message
if rank == 0:
print('\n', len(boxsizes), 'boxes and ', len(distances), 'distances')
print(len(cmds), 'command chunks')
print('jobs per chunk:')
tot = 0
for c in cmds:
n = len(c)
tot += n
print(' --> ', n)
print(tot, 'total jobs')
print(round(dataarray.nbytes/1e6, 2), 'Megabytes')
# scatter work
comm.Barrier()
work = comm.scatter(cmds, root=0)
n_jobs = len(work)*len(halos)
tot_work = len(work)
comm.Barrier()
time.sleep(rank*5)
print(line, ' [RANK '+str(rank)+' STARTING]', line)
# --[RUN]-------------------------------------------------------------------- #
for job in work:
# -------------------
# create empty lists
# running totals
n_stars = []
l_n_stars = []
n_trgb_stars = []
# mins
teff_min = []
age_min = []
feh_min = []
alpha_min = []
# maxes
teff_max = []
age_max = []
feh_max = []
alpha_max = []
# means
teff_mean = []
age_mean = []
feh_mean = []
alpha_mean = []
# -------------------
inner_mag = calculate_abs_mag(distance=job[2])
outter_mag = calculate_abs_mag(distance=(job[2]+d_step))
# halo operations
for halo in halos[:2]:
# mag cuts for (d --> d+step)
dcmc_Ks = load_mags(halo)
inner_set = find_dlims(dcmc_Ks, inner_mag)
outter_set = find_dlims(dcmc_Ks, outter_mag)
del dcmc_Ks
# make sure there are more visible stars closer than farther
if len(inner_set) <= len(outter_set):
print(line, '[RANK '+str(rank)+' MAG LIMIT PROBLEM]',
inner_mag, outter_mag, line)
# limits for magnitude cut at this distance
dlim = np.setdiff1d(inner_set, outter_set, assume_unique=True)
# list of xyz arrays
positions = prep_stars_1(halo, ['px', 'py', 'pz'], dlim)
n_projections = 1
# projection loop
for rep in range(n_projections):
# three different rotation angles
theta1 = np.divide(np.multiply(
rep*np.random.randint(90)*13*rep, np.pi), 180.0)
theta2 = np.divide(np.multiply(
rep*np.random.randint(90)*4*rep, np.pi), 180.0)
theta3 = np.divide(np.multiply(
rep*np.random.randint(90)*7*rep, np.pi), 180.0)
# rotate coordinators 3 times
rot_1 = rotate(positions, r_ax1, theta1)
rot_2 = rotate(rot_1, r_ax2, theta2)
rot_3 = rotate(rot_2, r_ax3, theta3)
# find the included indices's of rotated stars
lims = find_lims(rot_3[0], rot_3[1], job[3]).tolist()
########################
# [END OF CONFIGURATION]#
########################
# --------------------------------------------------- #
# we made it to all the way to the little square! #
# now... collect all the data at this configuration #
# --------------------------------------------------- #
# number of stars for this projection
n_stars += [len(rot_3[0][lims])]
# log(number) of stars for this projection
l_n_stars += [np.log10(len(rot_3[0][lims]))]
# effective temp (log(T/Kelvin) *converted to deg Kelvin)
temp_array = np.power(10, prep_stars_1(halo, ['teff'], dlim)[lims])
teff_min += [np.min(temp_array)]
teff_mean += [np.mean(temp_array)]
teff_max += [np.max(temp_array)]
# age (log (age/yr) *converted to Yr)
temp_array = np.power(10, prep_stars_1(halo, ['age'], dlim)[lims])
age_min += [np.min(temp_array)]
age_mean += [np.mean(temp_array)]
age_max += [np.max(temp_array)]
# [Fe/H]
temp_array = prep_stars_1(halo, ['feh'], dlim)[lims]
feh_min += [np.min(temp_array)]
feh_mean += [np.mean(temp_array)]
feh_max += [np.max(temp_array)]
# alpha abundances ([alpha/Fe])
temp_array = prep_stars_1(halo, ['alpha'], dlim)[lims]
alpha_min += [np.min(temp_array)]
alpha_mean += [np.mean(temp_array)]
alpha_max += [np.max(temp_array)]
# tip of the red giant branch (TRGB)/ red clump stars
red_giants = 0
# compare smass to mtip
temp_array = prep_stars_1(halo, ['smass'], dlim)[lims]
temp_array1 = prep_stars_1(halo, ['mtip'], dlim)[lims]
for sr, star in enumerate(temp_array):
if star > temp_array1[sr]:
red_giants += 1
n_trgb_stars += [red_giants]
# ----------------------- #
# [END PROJECTION LOOP] #
# ----------------------- #
# <back to halo loop>
n_jobs -= 1
# ----------------- #
# [END HALO LOOP] #
# ----------------- #
# <back to job loop>
# number of Fov
nfov = calculate_nFov(job[3], job[2])
# time required (hours)
time = 1000.0*(nfov)/3600.0
# ---------------------------------------------------- #
# Load the data array with all the consolidated data #
# ---------------------------------------------------- #
# Time (hours)
dataarray[job[0], job[1], 0] = time
# Number of Stars (log(n_stars))
minstars = np.min(l_n_stars)
maxstars = np.max(l_n_stars)
dataarray[job[0], job[1], 1] = maxstars-minstars
dataarray[job[0], job[1], 2] = maxstars
dataarray[job[0], job[1], 3] = np.mean(l_n_stars)
dataarray[job[0], job[1], 4] = minstars
# Effective Temperature (Kelvin)
dataarray[job[0], job[1], 5] = np.mean(teff_max)-np.mean(teff_min)
dataarray[job[0], job[1], 6] = np.mean(teff_max)
dataarray[job[0], job[1], 7] = np.mean(teff_mean)
dataarray[job[0], job[1], 8] = np.mean(teff_min)
# Age (Years)
dataarray[job[0], job[1], 9] = np.mean(age_max)-np.mean(age_min)
dataarray[job[0], job[1], 10] = np.mean(age_max)
dataarray[job[0], job[1], 11] = np.mean(age_mean)
dataarray[job[0], job[1], 12] = np.mean(age_min)
# Metallicity [Fe/H]
dataarray[job[0], job[1], 13] = | np.mean(feh_max) | numpy.mean |
"""Tests for the policies in the hbaselines/goal_conditioned subdirectory."""
import unittest
import numpy as np
import tensorflow as tf
import os
from gym.spaces import Box
from hbaselines.utils.tf_util import get_trainable_vars
from hbaselines.goal_conditioned.td3 import GoalConditionedPolicy as \
TD3GoalConditionedPolicy
from hbaselines.goal_conditioned.sac import GoalConditionedPolicy as \
SACGoalConditionedPolicy
from hbaselines.algorithms.rl_algorithm import SAC_PARAMS
from hbaselines.algorithms.rl_algorithm import TD3_PARAMS
from hbaselines.algorithms.rl_algorithm import GOAL_CONDITIONED_PARAMS
class TestBaseGoalConditionedPolicy(unittest.TestCase):
"""Test GoalConditionedPolicy in hbaselines/goal_conditioned/base.py."""
def setUp(self):
self.policy_params = {
'sess': tf.compat.v1.Session(),
'ac_space': Box(low=-1, high=1, shape=(1,)),
'ob_space': Box(low=-2, high=2, shape=(2,)),
'co_space': Box(low=-3, high=3, shape=(2,)),
'verbose': 0,
'total_steps': 1,
}
self.policy_params.update(TD3_PARAMS.copy())
self.policy_params.update(GOAL_CONDITIONED_PARAMS.copy())
def tearDown(self):
self.policy_params['sess'].close()
del self.policy_params
# Clear the graph.
tf.compat.v1.reset_default_graph()
def test_store_transition(self):
"""Check the functionality of the store_transition() method.
This method is tested for the following cases:
1. hindsight = False, relative_goals = False
2. hindsight = False, relative_goals = True
3. hindsight = True, relative_goals = False
4. hindsight = True, relative_goals = True
1. hindsight = False, relative_goals = False, meta_period = [5, 2]
"""
# =================================================================== #
# test case 1 #
# =================================================================== #
policy_params = self.policy_params.copy()
policy_params['relative_goals'] = False
policy_params['hindsight'] = False
policy_params['subgoal_testing_rate'] = 1
policy_params['meta_period'] = 4
policy_params['batch_size'] = 2
policy = TD3GoalConditionedPolicy(**policy_params)
# Initialize the variables of the policy.
policy.sess.run(tf.compat.v1.global_variables_initializer())
# Run the initialize method.
policy.initialize()
policy.meta_action = [np.array([5, 5])]
for i in range(4):
obs0 = np.array([i for _ in range(2)])
context0 = np.array([i for _ in range(3)])
action = np.array([i for _ in range(1)])
reward = i
obs1 = np.array([i+1 for _ in range(2)])
context1 = np.array([i for _ in range(3)])
done, is_final_step, evaluate = False, False, False
policy.store_transition(
obs0=obs0,
context0=context0,
action=action,
reward=reward,
obs1=obs1,
context1=context1,
done=done,
is_final_step=is_final_step,
evaluate=evaluate,
env_num=0,
)
obs_t = policy.replay_buffer._obs_t[0]
action_t = policy.replay_buffer._action_t[0]
reward = policy.replay_buffer._reward_t[0]
done = policy.replay_buffer._done_t[0]
# check the various attributes
self.assertTrue(
all(all(obs_t[i] ==
[np.array([0, 0]),
np.array([1, 1]),
np.array([2, 2]),
np.array([3, 3]),
np.array([4, 4])][i])
for i in range(len(obs_t)))
)
for i in range(len(action_t)):
self.assertTrue(
all(all(action_t[i][j] ==
[[np.array([5, 5]),
np.array([5, 5]),
np.array([5, 5]),
np.array([5, 5]),
np.array([5, 5])],
[np.array([0]),
np.array([1]),
np.array([2]),
np.array([3])]][i][j])
for j in range(len(action_t[i])))
)
self.assertEqual(reward,
[[6], [-5.656854249501219, -4.24264068713107,
-2.8284271247638677, -1.4142135624084504]])
self.assertEqual(done,
[False, False, False, False])
def test_store_transition_2(self):
# =================================================================== #
# test case 2 #
# =================================================================== #
policy_params = self.policy_params.copy()
policy_params['relative_goals'] = True
policy_params['hindsight'] = False
policy_params['subgoal_testing_rate'] = 1
policy_params['meta_period'] = 4
policy_params['batch_size'] = 2
policy = TD3GoalConditionedPolicy(**policy_params)
# Initialize the variables of the policy.
policy.sess.run(tf.compat.v1.global_variables_initializer())
# Run the initialize method.
policy.initialize()
policy.meta_action = [np.array([5, 5])]
for i in range(4):
obs0 = np.array([i for _ in range(2)])
context0 = np.array([i for _ in range(3)])
action = np.array([i for _ in range(1)])
reward = i
obs1 = np.array([i+1 for _ in range(2)])
context1 = np.array([i for _ in range(3)])
done, is_final_step, evaluate = False, False, False
policy.store_transition(
obs0=obs0,
context0=context0,
action=action,
reward=reward,
obs1=obs1,
context1=context1,
done=done,
is_final_step=is_final_step,
evaluate=evaluate,
env_num=0,
)
obs_t = policy.replay_buffer._obs_t[0]
action_t = policy.replay_buffer._action_t[0]
reward = policy.replay_buffer._reward_t[0]
done = policy.replay_buffer._done_t[0]
# check the various attributes
self.assertTrue(
all(all(obs_t[i] ==
[np.array([0, 0]),
np.array([1, 1]),
np.array([2, 2]),
np.array([3, 3]),
np.array([4, 4])][i])
for i in range(len(obs_t)))
)
for i in range(len(action_t)):
self.assertTrue(
all(all(action_t[i][j] ==
[[np.array([5, 5]),
np.array([5, 5]),
np.array([5, 5]),
np.array([5, 5]),
np.array([4, 4])],
[np.array([0]),
np.array([1]),
np.array([2]),
np.array([3])]][i][j])
for j in range(len(action_t[i])))
)
self.assertEqual(reward,
[[6], [-5.656854249501219, -5.656854249501219,
-5.656854249501219, -5.656854249501219]])
self.assertEqual(done, [False, False, False, False])
def test_store_transition_3(self):
# =================================================================== #
# test case 3 #
# =================================================================== #
policy_params = self.policy_params.copy()
policy_params['relative_goals'] = False
policy_params['hindsight'] = True
policy_params['subgoal_testing_rate'] = 1
policy_params['meta_period'] = 4
policy_params['batch_size'] = 2
policy = TD3GoalConditionedPolicy(**policy_params)
# Initialize the variables of the policy.
policy.sess.run(tf.compat.v1.global_variables_initializer())
# Run the initialize method.
policy.initialize()
policy.meta_action = [np.array([5, 5])]
for i in range(4):
obs0 = np.array([i for _ in range(2)])
context0 = np.array([i for _ in range(3)])
action = np.array([i for _ in range(1)])
reward = i
obs1 = np.array([i+1 for _ in range(2)])
context1 = np.array([i for _ in range(3)])
done, is_final_step, evaluate = False, False, False
policy.store_transition(
obs0=obs0,
context0=context0,
action=action,
reward=reward,
obs1=obs1,
context1=context1,
done=done,
is_final_step=is_final_step,
evaluate=evaluate,
env_num=0,
)
# unchanged sample
obs_t = policy.replay_buffer._obs_t[0]
action_t = policy.replay_buffer._action_t[0]
reward_t = policy.replay_buffer._reward_t[0]
done_t = policy.replay_buffer._done_t[0]
# check the various attributes
self.assertTrue(
all(all(obs_t[i] ==
[np.array([0, 0]),
np.array([1, 1]),
np.array([2, 2]),
np.array([3, 3]),
np.array([4, 4])][i])
for i in range(len(obs_t)))
)
for i in range(len(action_t)):
self.assertTrue(
all(all(action_t[i][j] ==
[[np.array([5, 5]),
np.array([5, 5]),
np.array([5, 5]),
np.array([5, 5]),
np.array([5, 5])],
[np.array([0]),
np.array([1]),
np.array([2]),
np.array([3])]][i][j])
for j in range(len(action_t[i])))
)
self.assertEqual(reward_t,
[[6], [-5.656854249501219, -4.24264068713107,
-2.8284271247638677, -1.4142135624084504]])
self.assertEqual(done_t, [False, False, False, False])
# hindsight sample
obs_t = policy.replay_buffer._obs_t[1]
action_t = policy.replay_buffer._action_t[1]
reward_t = policy.replay_buffer._reward_t[1]
done_t = policy.replay_buffer._done_t[1]
# check the various attributes
self.assertTrue(
all(all(obs_t[i] ==
[np.array([0, 0]),
np.array([1, 1]),
np.array([2, 2]),
np.array([3, 3]),
np.array([4, 4])][i])
for i in range(len(obs_t)))
)
for i in range(len(action_t)):
self.assertTrue(
all(all(action_t[i][j] ==
[[np.array([4, 4]),
np.array([4, 4]),
np.array([4, 4]),
np.array([4, 4]),
np.array([4, 4])],
[np.array([0]),
np.array([1]),
np.array([2]),
np.array([3])]][i][j])
for j in range(len(action_t[i])))
)
self.assertEqual(reward_t,
[[6], [-4.24264068713107, -2.8284271247638677,
-1.4142135624084504, -1e-05]])
self.assertEqual(done_t, [False, False, False, False])
def test_store_transition_4(self):
# =================================================================== #
# test case 4 #
# =================================================================== #
policy_params = self.policy_params.copy()
policy_params['relative_goals'] = True
policy_params['hindsight'] = True
policy_params['subgoal_testing_rate'] = 1
policy_params['meta_period'] = 4
policy_params['batch_size'] = 2
policy = TD3GoalConditionedPolicy(**policy_params)
# Initialize the variables of the policy.
policy.sess.run(tf.compat.v1.global_variables_initializer())
# Run the initialize method.
policy.initialize()
policy.meta_action = [np.array([5, 5])]
for i in range(4):
obs0 = np.array([i for _ in range(2)])
context0 = np.array([i for _ in range(3)])
action = np.array([i for _ in range(1)])
reward = i
obs1 = np.array([i+1 for _ in range(2)])
context1 = np.array([i for _ in range(3)])
done, is_final_step, evaluate = False, False, False
policy.store_transition(
obs0=obs0,
context0=context0,
action=action,
reward=reward,
obs1=obs1,
context1=context1,
done=done,
is_final_step=is_final_step,
evaluate=evaluate,
env_num=0,
)
# unchanged sample
obs_t = policy.replay_buffer._obs_t[0]
action_t = policy.replay_buffer._action_t[0]
reward = policy.replay_buffer._reward_t[0]
done = policy.replay_buffer._done_t[0]
# check the various attributes
self.assertTrue(
all(all(obs_t[i] ==
[np.array([0, 0]),
np.array([1, 1]),
np.array([2, 2]),
np.array([3, 3]),
np.array([4, 4])][i])
for i in range(len(obs_t)))
)
for i in range(len(action_t)):
self.assertTrue(
all(all(action_t[i][j] ==
[[np.array([5, 5]),
np.array([5, 5]),
np.array([5, 5]),
np.array([5, 5]),
np.array([4, 4])],
[np.array([0]),
np.array([1]),
np.array([2]),
np.array([3])]][i][j])
for j in range(len(action_t[i])))
)
self.assertEqual(reward,
[[6], [-5.656854249501219, -5.656854249501219,
-5.656854249501219, -5.656854249501219]])
self.assertEqual(done, [False, False, False, False])
# hindsight sample
obs_t = policy.replay_buffer._obs_t[1]
action_t = policy.replay_buffer._action_t[1]
reward_t = policy.replay_buffer._reward_t[1]
done_t = policy.replay_buffer._done_t[1]
# check the various attributes
self.assertTrue(
all(all(obs_t[i] ==
[np.array([0, 0]),
np.array([1, 1]),
np.array([2, 2]),
np.array([3, 3]),
np.array([4, 4])][i])
for i in range(len(obs_t)))
)
for i in range(len(action_t)):
self.assertTrue(
all(all(action_t[i][j] ==
[[np.array([4, 4]),
np.array([3, 3]),
np.array([2, 2]),
np.array([1, 1]),
np.array([0, 0])],
[np.array([0]),
np.array([1]),
np.array([2]),
np.array([3])]][i][j])
for j in range(len(action_t[i])))
)
self.assertEqual(reward_t,
[[6], [-4.24264068713107, -2.8284271247638677,
-1.4142135624084504, -1e-05]])
self.assertEqual(done_t, [False, False, False, False])
def test_store_transition_5(self):
# =================================================================== #
# test case 1 #
# =================================================================== #
policy_params = self.policy_params.copy()
policy_params['relative_goals'] = False
policy_params['hindsight'] = False
policy_params['subgoal_testing_rate'] = 1
policy_params['meta_period'] = [5, 2]
policy_params['num_levels'] = 3
policy_params['batch_size'] = 2
policy = TD3GoalConditionedPolicy(**policy_params)
# Initialize the variables of the policy.
policy.sess.run(tf.compat.v1.global_variables_initializer())
# Run the initialize method.
policy.initialize()
policy.meta_action = [[np.array([5, 5]), np.array([6, 6])]]
for i in range(10):
obs0 = np.array([i for _ in range(2)])
context0 = np.array([i for _ in range(3)])
action = np.array([i for _ in range(1)])
reward = i
obs1 = np.array([i+1 for _ in range(2)])
context1 = np.array([i for _ in range(3)])
done, is_final_step, evaluate = False, False, False
policy.store_transition(
obs0=obs0,
context0=context0,
action=action,
reward=reward,
obs1=obs1,
context1=context1,
done=done,
is_final_step=is_final_step,
evaluate=evaluate,
env_num=0,
)
obs_t = policy.replay_buffer._obs_t[0]
action_t = policy.replay_buffer._action_t[0]
reward = policy.replay_buffer._reward_t[0]
done = policy.replay_buffer._done_t[0]
# check the various attributes
self.assertTrue(
all(all(obs_t[i] ==
[np.array([0, 0]),
np.array([1, 1]),
np.array([2, 2]),
np.array([3, 3]),
np.array([4, 4]),
np.array([5, 5]),
np.array([6, 6]),
np.array([7, 7]),
np.array([8, 8]),
np.array([9, 9]),
np.array([10, 10])][i])
for i in range(len(obs_t)))
)
for i in range(len(action_t)):
self.assertTrue(
all(all(action_t[i][j] ==
[[np.array([5, 5]),
np.array([5, 5]),
np.array([5, 5]),
| np.array([5, 5]) | numpy.array |
# -*- coding: utf-8 -*-
'''
This modules contains functions necessary for applying group OWL
to the parameters
'''
from __future__ import division, print_function, absolute_import
import sys
sys.path.append('./owl_projection')
import tensorflow as tf
import numpy as np
import utils_nn
from projectedOWL import proxOWL
from numpy.linalg import norm
from utils_retrain import group_averaging
import matplotlib.pyplot as plt
from utils_plot import hist_plot
def reg_params_init(sess, config):
'''
This function initializes the regularization paramters.
Args:
sess: the predefined computation graph.
config: the yaml configuration file.
Returns:
layer_owl_params: n-tuple, each elements is an array containing the weights
of the corresponding layer.
'''
weight_placeholder = utils_nn.get_weight_placeholders()
layer_owl_params = []
min_num_row = float("Inf")
if config['PLD_transition'] == 0:
# read out the minimum number of rows
for idx, triple in enumerate(weight_placeholder):
param_i, placeholder_i, assign_op_i = triple
param_shape = sess.run(tf.shape(param_i))
if param_shape[0] < min_num_row:
min_num_row = param_shape[0]
# iterates through all layers, idx is the layer number
for idx, triple in enumerate(weight_placeholder):
param_i, placeholder_i, assign_op_i = triple
# OWL weights should be applied to the rows of the weight matrix
param_shape = sess.run(tf.shape(param_i))
reg_params = config['growl_params']
lambda_1 = np.float32(reg_params[idx][0])
lambda_2 = np.float32(reg_params[idx][1])
if (lambda_1 < 0) | (lambda_2 < 0):
raise Exception('regularization parameters must be non-negative')
# get row_num
row_num = int(param_shape[0])
if config['reg_params_type'] == 'PLD':
if config['PLD_transition'] != 0:
transition_ind = np.floor(param_shape[0]*config['PLD_transition']) -1
else:
transition_ind = min_num_row
param_index = np.linspace(start=row_num-1, stop=0, num=transition_ind)
param_index = np.append(param_index, np.zeros([1, int(param_shape[0]-transition_ind)]))
layer_owl_params.append(lambda_1 + lambda_2 * param_index)
assert len(layer_owl_params) == len(weight_placeholder)
return layer_owl_params
def apply_group_lasso(W, weights):
W_norm = norm(W, axis=1)
# apply prox op of Lasso to norms of rows
new_W_norm = np.maximum(W_norm - weights[0], 0)
# compute group owl
new_W = np.zeros_like(W)
for i in range(W.shape[0]):
# print('w_norm: {}'.format(W_norm[i]))
# print('new_w_norm: {}'.format(new_W_norm[i]))
if W_norm[i] < | np.finfo(np.float32) | numpy.finfo |
#
# Copyright (c) 2021 salesforce.com, inc.
# All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
# For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
#
"""
Wrapper around AWS's Random Cut Forest anomaly detection model.
"""
import bisect
import copy
import logging
from os.path import abspath, join, dirname
import numpy as np
from merlion.models.anomaly.base import DetectorConfig, DetectorBase
from merlion.transform.moving_average import DifferenceTransform
from merlion.transform.sequence import TransformSequence
from merlion.transform.resample import Shingle
from merlion.post_process.threshold import AggregateAlarms
from merlion.utils import UnivariateTimeSeries, TimeSeries
logger = logging.getLogger(__name__)
class JVMSingleton:
_initialized = False
def __init__(self):
import jpype
import jpype.imports
resource_dir = join(dirname(dirname(dirname(abspath(__file__)))), "resources")
jars = ["gson-2.8.6.jar", "randomcutforest-core-1.0.jar", "randomcutforest-serialization-json-1.0.jar"]
if not JVMSingleton._initialized:
jpype.startJVM(classpath=[join(resource_dir, jar) for jar in jars])
JVMSingleton._initialized = True
class RandomCutForestConfig(DetectorConfig):
_default_transform = TransformSequence([DifferenceTransform(), Shingle(size=5, stride=1)])
def __init__(
self,
n_estimators: int = 100,
parallel: bool = False,
seed: int = None,
max_n_samples: int = 512,
thread_pool_size: int = 1,
online_updates: bool = False,
**kwargs
):
"""
Configuration class for random cut forest. Refer to
https://github.com/aws/random-cut-forest-by-aws/tree/main/Java for
further documentation and defaults of the Java class.
:param n_estimators: The number of trees in this forest.
:param parallel: If true, then the forest will create an internal thread
pool. Forest updates and traversals will be submitted to this thread
pool, and individual trees will be updated or traversed in parallel.
For larger shingle sizes, dimensions, and number of trees,
parallelization may improve throughput.
We recommend users benchmark against their target use case.
:param seed: the random seed
:param max_n_samples: The number of samples retained by by stream
samplers in this forest.
:param thread_pool_size: The number of threads to use in the internal
thread pool.
:param online_updates: Whether to update the model while running
using it to evaluate new data.
"""
self.n_estimators = n_estimators
self.parallel = parallel
self.seed = seed
self.max_n_samples = max_n_samples
self.thread_pool_size = thread_pool_size
self.online_updates = online_updates
kwargs["max_score"] = np.floor(np.log2(max_n_samples)) + 1
super().__init__(**kwargs)
@property
def _default_threshold(self):
if not self.enable_calibrator:
return AggregateAlarms(alm_threshold=self.calibrator.max_score / 5)
return AggregateAlarms(alm_threshold=3.0)
@property
def java_params(self):
items = [
("numberOfTrees", self.n_estimators),
("randomSeed", self.seed),
("sampleSize", self.max_n_samples),
("threadPoolSize", self.thread_pool_size if self.parallel else None),
("parallelExecutionEnabled", self.parallel and self.thread_pool_size is not None),
]
return {k: v for k, v in items if v is not None}
class RandomCutForest(DetectorBase):
"""
The random cut forest is a refinement of the classic isolation forest
algorithm. It was proposed in
`Guha et al. 2016 <http://proceedings.mlr.press/v48/guha16.pdf>`_.
"""
config_class = RandomCutForestConfig
def __init__(self, config: RandomCutForestConfig):
super().__init__(config)
self.forest = None
@property
def online_updates(self) -> bool:
return self.config.online_updates
def __getstate__(self):
JVMSingleton()
from com.amazon.randomcutforest.serialize import RandomCutForestSerDe
# Copy state, remove forest, and then deepcopy
# (since we can't deepcopy the forest)
state = copy.copy(self.__dict__)
forest = state.pop("forest", None)
state = copy.deepcopy(state)
# Set the forest in the copied state to the serialized version
# The transform is specified the config, so don't save it
state["forest"] = str(RandomCutForestSerDe().toJson(forest))
return state
def __setstate__(self, state):
JVMSingleton()
from com.amazon.randomcutforest.serialize import RandomCutForestSerDe
# Remove the serialized forest from the state before setting it
# Set the forest manually after deserializing it
forest = RandomCutForestSerDe().fromJson(state.pop("forest", None))
super().__setstate__(state)
self.forest = forest
def _convert_point(self, point):
import jpype
return jpype.types.JArray.of(point)
def _forest_train(self, train_data: np.ndarray):
n, d = train_data.shape
scores = []
for i in range(n):
jpoint = self._convert_point(train_data[i, :])
scores.append(self.forest.getAnomalyScore(jpoint))
self.forest.update(jpoint)
return np.array(scores)
def _forest_predict(self, data: np.ndarray, online_updates: bool):
scores = []
n, d = data.shape
for i in range(n):
jpoint = self._convert_point(data[i, :])
scores.append(self.forest.getAnomalyScore(jpoint))
if online_updates:
self.forest.update(jpoint)
return | np.array(scores) | numpy.array |
import pandas as pd
import sys
import numpy as np
def speedTest(processed):
dataframe = pd.DataFrame()
array = np.ndarray((36652,1))
for system in processed:
for unit in processed[system]:
for flow in processed[system][unit]:
for property in processed[system][unit][flow]:
if type(processed[system][unit][flow][property]) == pd.Series:
ID = system+"_"+unit+"_"+flow+"_"+property
print(ID)
dataframe[ID] = processed[system][unit][flow][property]
np.append(array, | np.array(processed[system][unit][flow][property]) | numpy.array |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# 这个头文件指的是精准的除法,就是会帮你把小数后面的数值都保留下来,不会去除。
from __future__ import division
import os
import torch as t
import numpy as np
import cv2
import six
import itertools
from utils.config import opt
from model import FasterRCNNVGG16
from trainer import FasterRCNNTrainer
from data.util import read_image
from utils.vis_tool import vis_bbox
from utils import array_tool as at
from collections import defaultdict
from model.utils.bbox_tools import bbox_iou
from data.dataset import Dataset, TestDataset, inverse_normalize
from torch.utils import data as data_
from tqdm import tqdm
from utils.eval_tool import eval_detection_voc
def example_mining_map(trainer, modify_txt_path):
# 加载权重
trainset = TestDataset(opt, split=str(int(modify_txt_path) - 1))
train_dataloader = data_.DataLoader(trainset,
batch_size=1,
num_workers=opt.test_num_workers,
shuffle=False,
pin_memory=True
)
pred_bboxes1, pred_labels1, pred_scores1, gt_bboxes, gt_labels, gt_difficults, ID = bbox_result(
train_dataloader, trainer.faster_rcnn, test_num=1100)
map_result = every_map(pred_bboxes1, pred_labels1,
pred_scores1, gt_bboxes, gt_labels, gt_difficults)
# print map_result
modify(modify_txt_path, map_result, ID)
def example_mining_map_diversity(trainer, modify_txt_path):
# 加载权重
trainset = TestDataset(opt, split=str(int(modify_txt_path) - 1))
train_dataloader = data_.DataLoader(trainset,
batch_size=1,
num_workers=opt.test_num_workers,
shuffle=False,
pin_memory=True
)
pred_bboxes1, pred_labels1, pred_scores1, gt_bboxes, gt_labels, gt_difficults, ID = bbox_result(
train_dataloader, trainer.faster_rcnn, test_num=1100)
map_result = every_map(pred_bboxes1, pred_labels1,
pred_scores1, gt_bboxes, gt_labels, gt_difficults)
if opt.example_sort == 'max':
total_different = np.zeros(2000)
else:
total_different = np.zeros(2000) + 1000
ID2 = list()
trainset = TestDataset(opt, split=str(int(modify_txt_path) - 1))
train_dataloader = data_.DataLoader(trainset,
batch_size=1,
num_workers=opt.test_num_workers,
shuffle=False,
pin_memory=True
)
for ii, (imgs, sizes, gt_bboxes_, gt_labels_, gt_difficults_, id_) in enumerate(train_dataloader):
if len(gt_bboxes_) == 0:
continue
# print('1', imgs.shape)
ID2 += list(id_)
sizes = [sizes[0][0].item(), sizes[1][0].item()]
total_different[ii] = different(
imgs, gt_bboxes_, sizes, trainer.faster_rcnn, thread=0.3)
if opt.example_sort == 'max':
order = map_result.argsort()
order2 = total_different.argsort()[::-1]
else:
order = map_result.argsort()[::-1]
order2 = total_different.argsort()
sum = 0
f = open(opt.voc_data_dir + '/ImageSets/Main/' +
modify_txt_path + '.txt', "a")
for i in range(500):
for j in range(opt.mining_number * 2):
if ID2[order2[i]] == ID[order[j]]:
f.write(ID2[order2[i]] + '\n')
sum += 1
if sum >= opt.mining_number:
break
def example_mining_diversity(trainer, modify_txt_path):
if opt.example_sort == 'max':
total_different = np.zeros(2000)
else:
total_different = np.zeros(2000) + 1000
ID = list()
trainset = TestDataset(opt, split=str(int(modify_txt_path) - 1))
train_dataloader = data_.DataLoader(trainset,
batch_size=1,
num_workers=opt.test_num_workers,
shuffle=False,
pin_memory=True
)
for ii, (imgs, sizes, gt_bboxes_, gt_labels_, gt_difficults_, id_) in enumerate(train_dataloader):
if len(gt_bboxes_) == 0:
continue
# print('1', imgs.shape)
ID += list(id_)
sizes = [sizes[0][0].item(), sizes[1][0].item()]
total_different[ii] = different(
imgs, gt_bboxes_, sizes, trainer.faster_rcnn, thread=0.7)
modify(modify_txt_path, total_different, ID)
def example_mining_map_loss(trainer, modify_txt_path):
# 加载权重
trainset = TestDataset(opt, split=str(int(modify_txt_path) - 1))
train_dataloader = data_.DataLoader(trainset,
batch_size=1,
num_workers=opt.test_num_workers,
shuffle=False,
pin_memory=True
)
pred_bboxes1, pred_labels1, pred_scores1, gt_bboxes, gt_labels, gt_difficults, ID = bbox_result(
train_dataloader, trainer.faster_rcnn, test_num=1100)
map_result = every_map(pred_bboxes1, pred_labels1,
pred_scores1, gt_bboxes, gt_labels, gt_difficults)
f = open('loss.txt', "r")
a = dict.fromkeys(ID)
for i in range(len(ID)):
line = f.readline()
a[line[0:6]] = float(line[7:-2])
f.close()
for i in range(len(ID)):
map_result[i] = a[ID[i]] - map_result[i]
modify(modify_txt_path, map_result, ID)
def example_mining_loss(datatxt):
# 加载权重
f = open('loss.txt', "r")
f2 = open(opt.voc_data_dir + '/ImageSets/Main/' +
datatxt + '.txt', "a")
for i in range(opt.mining_number):
f2.write(f.readline()[0:6] + '\n')
f.close()
f2.close()
def bbox_iou(bbox_a, bbox_b):
# 传入的是真值标签和预测标签
if bbox_a.shape[1] != 4 or bbox_b.shape[1] != 4:
raise IndexError
# 由于可能两个bbox大小不一致,所以使用[bbox_a,bbox_b,2]存储遍历的bbox_a*bbox_b个bbox的比较
# top left 这边是计算了如图上第一幅的重叠左下角坐标值(x,y)
tl = np.maximum(bbox_a[:, None, :2], bbox_b[:, :2])
# bottom right 这边是计算了如图上第一幅的重叠左上角坐标值ymax和右下角坐标值xmax
br = | np.minimum(bbox_a[:, None, 2:], bbox_b[:, 2:]) | numpy.minimum |
from collections import OrderedDict
from copy import copy, deepcopy
from textwrap import dedent
import cPickle as pickle
import unittest
import numpy as np
import pandas as pd
from xray import Dataset, DataArray, Variable, backends, utils, align, indexing
from . import TestCase
_dims = {'dim1': 100, 'dim2': 50, 'dim3': 10}
_vars = {'var1': ['dim1', 'dim2'],
'var2': ['dim1', 'dim2'],
'var3': ['dim3', 'dim1'],
}
_testvar = sorted(_vars.keys())[0]
_testdim = sorted(_dims.keys())[0]
def create_test_data(seed=None):
rs = np.random.RandomState(seed)
obj = Dataset()
obj['time'] = ('time', pd.date_range('2000-01-01', periods=20))
obj['dim1'] = ('dim1', np.arange(_dims['dim1']))
obj['dim2'] = ('dim2', 0.5 * np.arange(_dims['dim2']))
obj['dim3'] = ('dim3', list('abcdefghij'))
for v, dims in sorted(_vars.items()):
data = rs.normal(size=tuple(_dims[d] for d in dims))
obj[v] = (dims, data, {'foo': 'variable'})
return obj
class UnexpectedDataAccess(Exception):
pass
class InaccessibleArray(utils.NDArrayMixin):
def __init__(self, array):
self.array = array
def __getitem__(self, key):
raise UnexpectedDataAccess("Tried accessing data")
class InaccessibleVariableDataStore(backends.InMemoryDataStore):
def __init__(self):
self.dimensions = OrderedDict()
self._variables = OrderedDict()
self.attrs = OrderedDict()
def set_variable(self, name, variable):
self._variables[name] = variable
return self._variables[name]
def open_store_variable(self, var):
data = indexing.LazilyIndexedArray(InaccessibleArray(var.values))
return Variable(var.dimensions, data, var.attrs)
@property
def store_variables(self):
return self._variables
class TestDataset(TestCase):
def test_repr(self):
data = create_test_data()
expected = dedent("""
<xray.Dataset>
Dimensions: (dim1: 100, dim2: 50, dim3: 10, time: 20)
Coordinates:
dim1 X
dim2 X
dim3 X
time X
Noncoordinates:
var1 0 1
var2 0 1
var3 1 0
Attributes:
Empty
""").strip()
actual = '\n'.join(x.rstrip() for x in repr(data).split('\n'))
self.assertEqual(expected, actual)
expected = dedent("""
<xray.Dataset>
Dimensions: ()
Coordinates:
None
Noncoordinates:
None
Attributes:
Empty
""").strip()
actual = '\n'.join(x.rstrip() for x in repr(Dataset()).split('\n'))
self.assertEqual(expected, actual)
def test_init(self):
var1 = Variable('x', 2 * np.arange(100))
var2 = Variable('x', np.arange(1000))
var3 = Variable(['x', 'y'], np.arange(1000).reshape(100, 10))
with self.assertRaisesRegexp(ValueError, 'but already exists'):
Dataset({'a': var1, 'b': var2})
with self.assertRaisesRegexp(ValueError, 'must be defined with 1-d'):
Dataset({'a': var1, 'x': var3})
# verify handling of DataArrays
expected = Dataset({'x': var1, 'z': var3})
actual = Dataset({'z': expected['z']})
self.assertDatasetIdentical(expected, actual)
def test_variable(self):
a = Dataset()
d = np.random.random((10, 3))
a['foo'] = (('time', 'x',), d)
self.assertTrue('foo' in a.variables)
self.assertTrue('foo' in a)
a['bar'] = (('time', 'x',), d)
# order of creation is preserved
self.assertTrue(a.variables.keys() == ['foo', 'time', 'x', 'bar'])
self.assertTrue(all([a.variables['foo'][i].values == d[i]
for i in np.ndindex(*d.shape)]))
# try to add variable with dim (10,3) with data that's (3,10)
with self.assertRaises(ValueError):
a['qux'] = (('time', 'x'), d.T)
def test_coordinate(self):
a = Dataset()
vec = np.random.random((10,))
attributes = {'foo': 'bar'}
a['x'] = ('x', vec, attributes)
self.assertTrue('x' in a.coordinates)
self.assertIsInstance(a.coordinates['x'].as_index, pd.Index)
self.assertVariableEqual(a.coordinates['x'], a.variables['x'])
b = Dataset()
b['x'] = ('x', vec, attributes)
self.assertVariableEqual(a['x'], b['x'])
self.assertEquals(a.dimensions, b.dimensions)
# this should work
a['x'] = ('x', vec[:5])
a['z'] = ('x', np.arange(5))
with self.assertRaises(ValueError):
# now it shouldn't, since there is a conflicting length
a['x'] = ('x', vec[:4])
arr = np.random.random((10, 1,))
scal = np.array(0)
with self.assertRaises(ValueError):
a['y'] = ('y', arr)
with self.assertRaises(ValueError):
a['y'] = ('y', scal)
self.assertTrue('y' not in a.dimensions)
def test_equals_and_identical(self):
data = create_test_data(seed=42)
self.assertTrue(data.equals(data))
self.assertTrue(data.identical(data))
data2 = create_test_data(seed=42)
data2.attrs['foobar'] = 'baz'
self.assertTrue(data.equals(data2))
self.assertTrue(data == data2)
self.assertFalse(data.identical(data2))
del data2['time']
self.assertFalse(data.equals(data2))
self.assertTrue(data != data2)
def test_attrs(self):
data = create_test_data(seed=42)
data.attrs = {'foobar': 'baz'}
self.assertTrue(data.attrs['foobar'], 'baz')
self.assertIsInstance(data.attrs, OrderedDict)
def test_indexed(self):
data = create_test_data()
slicers = {'dim1': slice(None, None, 2), 'dim2': slice(0, 2)}
ret = data.indexed(**slicers)
# Verify that only the specified dimension was altered
self.assertItemsEqual(data.dimensions, ret.dimensions)
for d in data.dimensions:
if d in slicers:
self.assertEqual(ret.dimensions[d],
np.arange(data.dimensions[d])[slicers[d]].size)
else:
self.assertEqual(data.dimensions[d], ret.dimensions[d])
# Verify that the data is what we expect
for v in data.variables:
self.assertEqual(data[v].dimensions, ret[v].dimensions)
self.assertEqual(data[v].attrs, ret[v].attrs)
slice_list = [slice(None)] * data[v].values.ndim
for d, s in slicers.iteritems():
if d in data[v].dimensions:
inds = np.nonzero(np.array(data[v].dimensions) == d)[0]
for ind in inds:
slice_list[ind] = s
expected = data[v].values[slice_list]
actual = ret[v].values
np.testing.assert_array_equal(expected, actual)
with self.assertRaises(ValueError):
data.indexed(not_a_dim=slice(0, 2))
ret = data.indexed(dim1=0)
self.assertEqual({'time': 20, 'dim2': 50, 'dim3': 10}, ret.dimensions)
self.assertItemsEqual(list(data.noncoordinates) + ['dim1'],
ret.noncoordinates)
ret = data.indexed(time=slice(2), dim1=0, dim2=slice(5))
self.assertEqual({'time': 2, 'dim2': 5, 'dim3': 10}, ret.dimensions)
self.assertItemsEqual(list(data.noncoordinates) + ['dim1'],
ret.noncoordinates)
ret = data.indexed(time=0, dim1=0, dim2=slice(5))
self.assertItemsEqual({'dim2': 5, 'dim3': 10}, ret.dimensions)
self.assertItemsEqual(list(data.noncoordinates) + ['dim1', 'time'],
ret.noncoordinates)
def test_labeled(self):
data = create_test_data()
int_slicers = {'dim1': slice(None, None, 2),
'dim2': slice(2),
'dim3': slice(3)}
loc_slicers = {'dim1': slice(None, None, 2),
'dim2': slice(0, 0.5),
'dim3': slice('a', 'c')}
self.assertEqual(data.indexed(**int_slicers),
data.labeled(**loc_slicers))
data['time'] = ('time', pd.date_range('2000-01-01', periods=20))
self.assertEqual(data.indexed(time=0),
data.labeled(time='2000-01-01'))
self.assertEqual(data.indexed(time=slice(10)),
data.labeled(time=slice('2000-01-01',
'2000-01-10')))
self.assertEqual(data, data.labeled(time=slice('1999', '2005')))
self.assertEqual(data.indexed(time=slice(3)),
data.labeled(
time=pd.date_range('2000-01-01', periods=3)))
def test_reindex_like(self):
data = create_test_data()
expected = data.indexed(dim1=slice(10), time=slice(13))
actual = data.reindex_like(expected)
self.assertDatasetIdentical(actual, expected)
expected = data.copy(deep=True)
expected['dim3'] = ('dim3', list('cdefghijkl'))
expected['var3'][:-2] = expected['var3'][2:]
expected['var3'][-2:] = np.nan
actual = data.reindex_like(expected)
self.assertDatasetIdentical(actual, expected)
def test_reindex(self):
data = create_test_data()
self.assertDatasetIdentical(data, data.reindex())
expected = data.indexed(dim1=slice(10))
actual = data.reindex(dim1=data['dim1'][:10])
self.assertDatasetIdentical(actual, expected)
actual = data.reindex(dim1=data['dim1'][:10].values)
self.assertDatasetIdentical(actual, expected)
actual = data.reindex(dim1=data['dim1'][:10].as_index)
self.assertDatasetIdentical(actual, expected)
def test_align(self):
left = create_test_data()
right = left.copy(deep=True)
right['dim3'] = ('dim3', list('cdefghijkl'))
right['var3'][:-2] = right['var3'][2:]
right['var3'][-2:] = np.random.randn(*right['var3'][-2:].shape)
intersection = list('cdefghij')
union = list('abcdefghijkl')
left2, right2 = align(left, right, join='inner')
self.assertArrayEqual(left2['dim3'], intersection)
self.assertDatasetIdentical(left2, right2)
left2, right2 = align(left, right, join='outer')
self.assertVariableEqual(left2['dim3'], right2['dim3'])
self.assertArrayEqual(left2['dim3'], union)
self.assertDatasetIdentical(left2.labeled(dim3=intersection),
right2.labeled(dim3=intersection))
self.assertTrue(np.isnan(left2['var3'][-2:]).all())
self.assertTrue(np.isnan(right2['var3'][:2]).all())
left2, right2 = align(left, right, join='left')
self.assertVariableEqual(left2['dim3'], right2['dim3'])
self.assertVariableEqual(left2['dim3'], left['dim3'])
self.assertDatasetIdentical(left2.labeled(dim3=intersection),
right2.labeled(dim3=intersection))
self.assertTrue(np.isnan(right2['var3'][:2]).all())
left2, right2 = align(left, right, join='right')
self.assertVariableEqual(left2['dim3'], right2['dim3'])
self.assertVariableEqual(left2['dim3'], right['dim3'])
self.assertDatasetIdentical(left2.labeled(dim3=intersection),
right2.labeled(dim3=intersection))
self.assertTrue( | np.isnan(left2['var3'][-2:]) | numpy.isnan |
import os
import torch
import shutil
import numpy as np
from lib.loss.loss import mpjpe, n_mpjpe, p_mpjpe, mean_velocity_error, weighted_mpjpe
from lib.dataloader.generators import UnchunkedGenerator
from lib.camera.camera import image_coordinates
from lib.skeleton.bone import get_bone_length_from_3d_pose, get_bone_unit_vector_from_3d_pose
class Trainer():
def __init__(self,
data_config, model_config, train_config, plot_config,
train_generator, test_generator,
models, optimizer,
kps_left, kps_right, joints_left, joints_right, plotter, best_performance=None):
self.data_config = data_config
self.model_config = model_config
self.train_config = train_config
self.plot_config = plot_config
self.lr = train_config['LEARNING_RATE']
self.optimizer = optimizer
self.train_generator = train_generator
self.test_generator = test_generator
self.pos_model_train = models['train_pos']
self.pos_model_test = models['test_pos']
self.trj_model_train = models['train_trj']
self.trj_model_test = models['test_trj']
self.min_loss = 1e5 if best_performance is None else best_performance
self.losses_3d_train = []
self.losses_3d_valid = []
self.kps_left = kps_left
self.kps_right = kps_right
self.joints_left = joints_left
self.joints_right = joints_right
self.receptive_field = model_config['NUM_FRAMES']
self.plotter = plotter
@staticmethod
def eval_data_prepare(receptive_field, inputs_2d, inputs_3d):
inputs_2d_p = torch.squeeze(inputs_2d)
if inputs_3d is not None:
inputs_3d_p = inputs_3d.permute(1, 0, 2, 3)
else:
inputs_3d_p = inputs_3d
out_num = inputs_2d_p.shape[0] - receptive_field + 1
eval_input_2d = torch.empty(out_num, receptive_field, inputs_2d_p.shape[1], inputs_2d_p.shape[2])
for i in range(out_num):
eval_input_2d[i, :, :, :] = inputs_2d_p[i:i + receptive_field, :, :]
return eval_input_2d, inputs_3d_p
def train(self, epoch, mlog):
N = 0
epoch_loss_3d_train = 0
epoch_loss_3d_pos = 0
epoch_loss_3d_trj = 0
epoch_loss_3d_bone = 0
self.pos_model_train.train()
if self.model_config['TRAJECTORY_MODEL']:
self.trj_model_train.train()
iter = 0
for cam, batch_3d, batch_2d in self.train_generator.next_epoch():
inputs_2d = torch.from_numpy(batch_2d.astype('float32'))
inputs_3d = torch.from_numpy(batch_3d.astype('float32'))
inputs_param = torch.from_numpy(cam.astype('float32'))
if torch.cuda.is_available():
inputs_2d = inputs_2d.cuda()
inputs_3d = inputs_3d.cuda()
inputs_param = inputs_param.cuda()
if self.model_config['TRAJECTORY_MODEL']:
inputs_traj = inputs_3d[:, :, :1].clone()
if self.data_config['RAY_ENCODING']:
# do nothing
if self.model_config['TRAJECTORY_MODEL']:
inputs_3d[:, :, 1:] -= inputs_3d[:, :, 0:1]
inputs_3d[:, :, 0] = 0
else:
inputs_3d[:, :, 1:] -= inputs_3d[:, :, 0:1]
inputs_3d[:, :, 0] = 0
self.optimizer.zero_grad()
# Predict 3D poses
predicted_3d_pos = self.pos_model_train(inputs_2d, inputs_param)
loss_3d_pos = mpjpe(predicted_3d_pos, inputs_3d)
epoch_loss_3d_train += inputs_3d.shape[0] * inputs_3d.shape[1] * loss_3d_pos.item()
epoch_loss_3d_pos += inputs_3d.shape[0] * inputs_3d.shape[1] * loss_3d_pos.item()
N += inputs_3d.shape[0] * inputs_3d.shape[1]
total_loss = loss_3d_pos
if self.model_config['BONE_COMPARISON']:
predicted_bone_length = get_bone_length_from_3d_pose(predicted_3d_pos)
target_bone_length = get_bone_length_from_3d_pose(inputs_3d)
loss_3d_bone_length = mpjpe(predicted_bone_length, target_bone_length)
predicted_bone_unit_vector = get_bone_unit_vector_from_3d_pose(predicted_3d_pos)
target_bone_unit_vector = get_bone_unit_vector_from_3d_pose(inputs_3d)
loss_3d_bone_angle = mpjpe(predicted_bone_unit_vector, target_bone_unit_vector)
epoch_loss_3d_bone += inputs_3d.shape[0] * inputs_3d.shape[1] * (loss_3d_bone_length.item() + loss_3d_bone_angle.item())
total_loss += (loss_3d_bone_length + loss_3d_bone_angle)
if self.model_config['TRAJECTORY_MODEL']:
predicted_3d_trj = self.trj_model_train(inputs_2d, inputs_param)
w = torch.abs(1 / inputs_traj[:, :, :, 2]) # Weight inversely proportional to depth
loss_3d_traj = weighted_mpjpe(predicted_3d_trj, inputs_traj, w)
assert inputs_traj.shape[0] * inputs_traj.shape[1] == inputs_3d.shape[0] * inputs_3d.shape[1]
epoch_loss_3d_train += inputs_3d.shape[0] * inputs_3d.shape[1] * loss_3d_traj.item()
epoch_loss_3d_trj += inputs_3d.shape[0] * inputs_3d.shape[1] * loss_3d_traj.item()
total_loss += loss_3d_traj
# ---------------- visualization ---------------- #
if iter % 2048 == 0 and self.plotter is not None and epoch % 64 == 0:
self.plotter.show_plot(
epoch, inputs_2d.detach().cpu().numpy(),
inputs_3d.detach().cpu().numpy(),
predicted_3d_pos.detach().cpu().numpy(),
dataset=self.data_config['DATASET'],
gt=self.data_config['KEYPOINTS']
)
# ---------------- visualization ---------------- #
iter += 1
total_loss.backward()
self.optimizer.step()
self.losses_3d_train.append(epoch_loss_3d_train / N)
torch.cuda.empty_cache()
if self.plotter:
# plot all the losses
self.plotter.log_metric('train', self.losses_3d_train[-1] * 1000, epoch)
self.plotter.log_metric('train_pos', epoch_loss_3d_pos / N * 1000, epoch)
self.plotter.log_metric('train_trj', epoch_loss_3d_trj / N * 1000, epoch)
self.plotter.log_metric('train_bone', epoch_loss_3d_bone / N * 1000, epoch)
# plot all the learning rates
self.plotter.log_metric('lr', self.lr, epoch)
# return the current epoch's mpjme
return self.losses_3d_train[-1], self.lr
def test(self, epoch, mlog):
with torch.no_grad():
self.pos_model_test.load_state_dict(self.pos_model_train.state_dict(), strict=True)
self.pos_model_test.eval()
if self.model_config['TRAJECTORY_MODEL']:
self.trj_model_test.load_state_dict(self.trj_model_train.state_dict(), strict=True)
self.trj_model_test.eval()
epoch_loss_3d_valid = 0
epoch_loss_3d_pos = 0
epoch_loss_3d_trj = 0
epoch_loss_3d_bone = 0
N = 0
# Evaluate on test set
for cam, batch, batch_2d in self.test_generator.next_epoch():
cam_param = np.array([[(-c.Rw2c.T @ c.Tw2c)[2][0], c.cam_pitch_rad] for c in cam]).astype('float32')
inputs_2d = torch.from_numpy(batch_2d.astype('float32'))
inputs_3d = torch.from_numpy(batch.astype('float32'))
##### convert size
inputs_2d, inputs_3d = self.eval_data_prepare(self.receptive_field, inputs_2d, inputs_3d)
inputs_param = torch.from_numpy(cam_param)
if torch.cuda.is_available():
inputs_2d = inputs_2d.cuda()
inputs_3d = inputs_3d.cuda()
inputs_param = inputs_param.cuda()
if self.model_config['TRAJECTORY_MODEL']:
inputs_traj = inputs_3d.clone()
if self.data_config['RAY_ENCODING']:
# do nothing
if self.model_config['TRAJECTORY_MODEL']:
inputs_3d[:, :, 1:] -= inputs_3d[:, :, 0:1]
inputs_3d[:, :, 0] = 0
else:
inputs_3d[:, :, 1:] -= inputs_3d[:, :, 0:1]
inputs_3d[:, :, 0] = 0
predicted_3d_pos = self.pos_model_test(inputs_2d, inputs_param)
epoch_loss_3d_pos += inputs_3d.shape[0] * inputs_3d.shape[1] * mpjpe(predicted_3d_pos, inputs_3d).item()
if self.model_config['BONE_COMPARISON']:
predicted_bone_length = get_bone_length_from_3d_pose(predicted_3d_pos)
target_bone_length = get_bone_length_from_3d_pose(inputs_3d)
loss_3d_bone_length = mpjpe(predicted_bone_length, target_bone_length)
predicted_bone_unit_vector = get_bone_unit_vector_from_3d_pose(predicted_3d_pos)
target_bone_unit_vector = get_bone_unit_vector_from_3d_pose(inputs_3d)
loss_3d_bone_angle = mpjpe(predicted_bone_unit_vector, target_bone_unit_vector)
epoch_loss_3d_bone += inputs_3d.shape[0] * inputs_3d.shape[1] * (loss_3d_bone_length.item() + loss_3d_bone_angle.item())
if self.model_config['TRAJECTORY_MODEL']:
predicted_3d_trj = self.trj_model_test(inputs_2d, inputs_param)
predicted_3d_pos += predicted_3d_trj
loss_3d_pos = mpjpe(predicted_3d_pos, inputs_traj)
w = torch.abs(1 / inputs_traj[:, :, 0, 2])
epoch_loss_3d_trj += inputs_3d.shape[0] * inputs_3d.shape[1] * weighted_mpjpe(predicted_3d_trj, inputs_traj[:, :, 0:1], w).item()
else:
loss_3d_pos = mpjpe(predicted_3d_pos, inputs_3d)
epoch_loss_3d_valid += inputs_3d.shape[0] * inputs_3d.shape[1] * loss_3d_pos.item()
N += inputs_3d.shape[0] * inputs_3d.shape[1]
self.losses_3d_valid.append(epoch_loss_3d_valid / N)
# Save checkpoint if necessary
if epoch % self.train_config['CHECKPOINT_FREQUENCY'] == 0:
chk_path = os.path.join(self.train_config['CHECKPOINT'], 'epoch_{}.bin'.format(epoch))
mlog.info('Saving epochs {}\'s checkpoint to {}.'.format(epoch, chk_path))
if self.model_config['TRAJECTORY_MODEL']:
torch.save({
'epoch': epoch,
'lr': self.lr,
'best_performance': self.losses_3d_valid[-1] * 1000 if self.losses_3d_valid[-1] * 1000 < self.min_loss else self.min_loss,
'random_state': self.train_generator.random_state(),
'optimizer': self.optimizer.state_dict(),
'model_pos': self.pos_model_train.state_dict(),
'model_trj': self.trj_model_train.state_dict()
}, chk_path)
else:
torch.save({
'epoch': epoch,
'lr': self.lr,
'best_performance': self.losses_3d_valid[-1] * 1000 if self.losses_3d_valid[-1] * 1000 < self.min_loss else self.min_loss,
'random_state': self.train_generator.random_state(),
'optimizer': self.optimizer.state_dict(),
'model_pos': self.pos_model_train.state_dict(),
}, chk_path)
#### save best checkpoint
best_chk_path = os.path.join(self.train_config['CHECKPOINT'], 'best_epoch.bin'.format(epoch))
if self.losses_3d_valid[-1] * 1000 < self.min_loss:
self.min_loss = self.losses_3d_valid[-1] * 1000
mlog.info('Saving best checkpoint to {} with mpjpe: {}.'.format(best_chk_path, self.min_loss))
shutil.copy(chk_path, best_chk_path)
cmd = 'rm {}'.format(chk_path)
os.system(cmd)
# Decay learning rate exponentially
self.lr *= self.train_config['LR_DECAY']
for param_group in self.optimizer.param_groups:
param_group['lr'] *= self.train_config['LR_DECAY']
# Decay BatchNorm momentum
if self.model_config['MODEL'] == 'VideoPose3D':
momentum = self.train_config['INITIAL_MOMENTUM'] * np.exp(-(epoch-1) / self.train_config['EPOCHS'] * np.log(self.train_config['INITIAL_MOMENTUM'] / self.train_config['FINAL_MOMENTUM']))
self.pos_model_train.module.set_bn_momentum(momentum)
if self.model_config['TRAJECTORY_MODEL']:
self.trj_model_train.module.set_bn_momentum(momentum)
if self.plotter:
# plot all the losses
self.plotter.log_metric('test', self.losses_3d_valid[-1] * 1000, epoch)
self.plotter.log_metric('test_pos', epoch_loss_3d_pos / N * 1000, epoch)
self.plotter.log_metric('test_trj', epoch_loss_3d_trj / N * 1000, epoch)
self.plotter.log_metric('test_bone', epoch_loss_3d_bone / N * 1000, epoch)
# return the current epoch's mpjme
return self.losses_3d_valid[-1]
def evaluate_core(self, test_generator, action=None, return_predictions=False, flip_test=False):
epoch_loss_3d_pos = 0
epoch_loss_3d_pos_procrustes = 0
epoch_loss_3d_pos_scale = 0
epoch_loss_3d_vel = 0
epoch_loss_3d_root = 0
with torch.no_grad():
self.pos_model_test.eval()
if self.model_config['TRAJECTORY_MODEL']:
self.trj_model_test.eval()
N = 0
for cam, batch, batch_2d in test_generator.next_epoch():
cam_param = np.array([(-cam.Rw2c.T @ cam.Tw2c)[2][0], cam.cam_pitch_rad]).astype('float32')
inputs_2d = torch.from_numpy(batch_2d.astype('float32'))
if flip_test:
inputs_2d_flip = inputs_2d.clone()
inputs_2d_flip[:, :, :, 0] *= -1
inputs_2d_flip[:, :, self.kps_left + self.kps_right, :] = inputs_2d_flip[:, :, self.kps_right + self.kps_left, :]
if return_predictions:
inputs_2d, _ = self.eval_data_prepare(self.receptive_field, inputs_2d, None)
inputs_param = torch.from_numpy(np.tile(cam_param, (inputs_2d.shape[0], 1)))
if self.model_config['TRAJECTORY_MODEL']:
return (
self.pos_model_test(inputs_2d, inputs_param) + self.trj_model_test(inputs_2d, inputs_param)
).squeeze(0).cpu().numpy()
else:
return self.pos_model_test(inputs_2d, inputs_param).squeeze(0).cpu().numpy()
if self.model_config['TRAJECTORY_MODEL'] or self.data_config['RAY_ENCODING']:
# do nothing
pass
else:
batch[:, :, 1:] -= batch[:, :, 0:1]
batch[:, :, 0] = 0
inputs_3d = torch.from_numpy(batch.astype('float32'))
inputs_2d, inputs_3d = self.eval_data_prepare(self.receptive_field, inputs_2d, inputs_3d)
inputs_param = torch.from_numpy(np.tile(cam_param, (inputs_2d.shape[0], 1)))
if flip_test:
inputs_2d_flip, _ = self.eval_data_prepare(self.receptive_field, inputs_2d_flip, inputs_3d)
if torch.cuda.is_available():
inputs_2d = inputs_2d.cuda()
inputs_3d = inputs_3d.cuda()
inputs_param = inputs_param.cuda()
if flip_test:
inputs_2d_flip = inputs_2d_flip.cuda()
if self.model_config['TRAJECTORY_MODEL']:
predicted_3d_pos = self.pos_model_test(inputs_2d, inputs_param)
if flip_test:
predicted_3d_pos_flip = self.pos_model_test(inputs_2d_flip, inputs_param)
predicted_3d_pos_flip[:, :, :, 0] *= -1
predicted_3d_pos_flip[:, :, self.kps_left + self.kps_right] = predicted_3d_pos_flip[:, :,
self.kps_right + self.kps_left]
predicted_3d_pos = torch.mean(torch.cat((predicted_3d_pos, predicted_3d_pos_flip), dim=1),
dim=1,
keepdim=True)
predicted_3d_trj = self.trj_model_test(inputs_2d, inputs_param)
if flip_test:
predicted_3d_trj_flip = self.trj_model_test(inputs_2d_flip, inputs_param)
predicted_3d_trj_flip[:, :, :, 0] *= -1
predicted_3d_trj = torch.mean(torch.cat((predicted_3d_trj, predicted_3d_trj_flip), dim=1),
dim=1,
keepdim=True)
predicted_3d_pos += predicted_3d_trj
if not cam is None:
pred = predicted_3d_pos.cpu().numpy()
target = inputs_3d.cpu().numpy()
if self.data_config['RAY_ENCODING']:
pred_world = cam.normalized2world(pred)
target_world = cam.normalized2world(target)
else:
pred_world = cam.camera2world(pred)
target_world = cam.camera2world(target)
predicted_3d_pos = torch.from_numpy(pred_world)
inputs_3d = torch.from_numpy(target_world)
else:
predicted_3d_pos = self.pos_model_test(inputs_2d, inputs_param)
if flip_test:
predicted_3d_pos_flip = self.pos_model_test(inputs_2d_flip, inputs_param)
predicted_3d_pos_flip[:, :, :, 0] *= -1
predicted_3d_pos_flip[:, :, self.kps_left + self.kps_right] = predicted_3d_pos_flip[:, :,
self.kps_right + self.kps_left]
predicted_3d_pos = torch.mean(torch.cat((predicted_3d_pos, predicted_3d_pos_flip), dim=1), dim=1,
keepdim=True)
if self.data_config['RAY_ENCODING']:
# do nothing
pred = predicted_3d_pos.cpu().numpy()
target = inputs_3d.cpu().numpy()
pred_world = cam.normalized2world(pred)
target_world = cam.normalized2world(target)
predicted_3d_pos = torch.from_numpy(pred_world)
inputs_3d = torch.from_numpy(target_world)
else:
# do nothing
pass
epoch_loss_3d_pos += inputs_3d.shape[0] * inputs_3d.shape[1] * mpjpe(predicted_3d_pos, inputs_3d).item()
epoch_loss_3d_root += inputs_3d.shape[0] * inputs_3d.shape[1] * mpjpe(predicted_3d_pos[:, :, 0:1, :], inputs_3d[:, :, 0:1, :]).item()
epoch_loss_3d_pos_scale += inputs_3d.shape[0] * inputs_3d.shape[1] * n_mpjpe(predicted_3d_pos, inputs_3d).item()
inputs = inputs_3d.cpu().numpy().reshape(-1, inputs_3d.shape[-2], inputs_3d.shape[-1])
predicted_3d_pos = predicted_3d_pos.cpu().numpy().reshape(-1, inputs_3d.shape[-2], inputs_3d.shape[-1])
epoch_loss_3d_pos_procrustes += inputs_3d.shape[0] * inputs_3d.shape[1] * p_mpjpe(predicted_3d_pos, inputs)
epoch_loss_3d_vel += inputs_3d.shape[0] * inputs_3d.shape[1] * mean_velocity_error(predicted_3d_pos, inputs)
N += inputs_3d.shape[0] * inputs_3d.shape[1]
e1 = (epoch_loss_3d_pos / N) * 1000
e2 = (epoch_loss_3d_pos_procrustes / N) * 1000
e3 = (epoch_loss_3d_pos_scale / N) * 1000
ev = (epoch_loss_3d_vel / N) * 1000
er = (epoch_loss_3d_root / N) * 1000
return e1, e2, e3, ev, er
def evaluate(self, mlog, subjects_test, pose_data, action_filter, pad, causal_shift, epoch, plot=False):
all_actions = dict()
for subject in subjects_test:
# all_actions.setdefault('Sitting 1', list()).append((subject, 'Sitting 1'))
if action_filter == None:
action_keys = pose_data.get_dataset()[subject].keys()
else:
action_keys = action_filter
for action in action_keys:
all_actions.setdefault(action.split(' ')[0], list()).append((subject, action))
errors_p1 = []
errors_p2 = []
errors_p3 = []
errors_vel = []
errors_root = []
if 'CAMERA_WISE_PERFORMANCE' in self.data_config and self.data_config['CAMERA_WISE_PERFORMANCE']:
camera_dist = pose_data.get_dataset().camera_dist
for cam_idx in range(len(camera_dist)):
cam_id = camera_dist[cam_idx]
for action_key in all_actions.keys():
poses_cam, poses_act, poses_2d_act = pose_data.fetch_via_action(all_actions[action_key], camera_idx=cam_idx)
action_generator = UnchunkedGenerator(poses_cam, poses_act, poses_2d_act,
pad=pad, causal_shift=causal_shift,
kps_left=self.kps_left, kps_right=self.kps_right,
joints_left=self.joints_left, joints_right=self.joints_right)
e1, e2, e3, ev, er = self.evaluate_core(action_generator, action_key,
flip_test=self.train_config['TEST_TIME_AUGMENTATION'])
errors_p1.append(e1)
errors_p2.append(e2)
errors_p3.append(e3)
errors_vel.append(ev)
errors_root.append(er)
p1, p2, p3, p4 = round(np.mean(errors_p1), 1), round(np.mean(errors_p2), 1), round(np.mean(errors_p3), 1), round(np.mean(errors_vel), 1)
p5 = round(np.mean(errors_root), 1)
mlog.info('CAM ID {}, {} {} {} {} {}'.format(cam_id, p1, p2, p3, p4, p5))
else:
for action_key in all_actions.keys():
poses_cam, poses_act, poses_2d_act = pose_data.fetch_via_action(all_actions[action_key])
action_generator = UnchunkedGenerator(poses_cam, poses_act, poses_2d_act,
pad=pad, causal_shift=causal_shift,
kps_left=self.kps_left, kps_right=self.kps_right,
joints_left=self.joints_left, joints_right=self.joints_right)
if action_key is None:
mlog.info('----------')
else:
mlog.info('----' + action_key + '----')
e1, e2, e3, ev, er = self.evaluate_core(action_generator, action_key,
flip_test=self.train_config['TEST_TIME_AUGMENTATION'])
mlog.info('Protocol #1 Error (MPJPE): {} mm'.format(e1))
mlog.info('Protocol #2 Error (P-MPJPE): {} mm'.format(e2))
mlog.info('Protocol #3 Error (N-MPJPE): {} mm'.format(e3))
mlog.info('Velocity Error (MPJVE): {} mm'.format(ev))
mlog.info('Root Error (MRPE): {} mm'.format(er))
mlog.info('----------')
errors_p1.append(e1)
errors_p2.append(e2)
errors_p3.append(e3)
errors_vel.append(ev)
errors_root.append(er)
mlog.info('Protocol #1 (MPJPE) action-wise average: {} mm'.format(round(np.mean(errors_p1), 1)))
mlog.info('Protocol #2 (P-MPJPE) action-wise average: {} mm'.format(round(np.mean(errors_p2), 1)))
mlog.info('Protocol #3 (N-MPJPE) action-wise average: {} mm'.format(round(np.mean(errors_p3), 1)))
mlog.info('Velocity (MPJVE) action-wise average: {} mm'.format(round(np.mean(errors_vel), 1)))
mlog.info('Root (MRPE) action-wise average: {} mm'.format(round( | np.mean(errors_root) | numpy.mean |
"""
Power Flow Analysis: Support Functions
Created By:
<NAME>
<NAME>
"""
import numpy as np
from numpy.linalg import inv
import pandas as pd
"""
Imports Bus and line data from excel sheets
Takes in an array containing ['File Location', 'Sheet Name']
Returns two panda data frames for the bus and line data
"""
def import_BusAndLineData(BusData_Location, LineData_Location):
BusData = pd.read_excel(BusData_Location[0], sheet_name=BusData_Location[1])
LineData = pd.read_excel(LineData_Location[0], sheet_name=LineData_Location[1])
return BusData, LineData
"""
Builds G and B matrices to be used in Power Flow calculations
Takes in data frame containing all line information, and number of busses in system
Returns G and B arrays
"""
def build_AdmittanceMatrix(LineData, sys_Size):
col = np.array(LineData.columns)
line_From = np.array(LineData[col[0]])
line_To = np.array(LineData[col[1]])
line_R = np.array(LineData[col[2]])
line_X = np.array(LineData[col[3]])
line_Z = np.array(LineData[col[2]]) + 1j*np.array(LineData[col[3]])
line_Y = 1/line_Z
line_B = np.array(LineData[col[4]])
line_Fmax = np.array(LineData[col[5]])
sys_Y = np.array([[0 for j in range(sys_Size)] for i in range(sys_Size)], dtype = complex)
sys_G = np.zeros((sys_Size, sys_Size))
sys_B = np.zeros((sys_Size, sys_Size))
#X_ij
for i in range(sys_Size): #Row
for j in range(sys_Size): #Column
if i==j: # Diagonal, sum of Y(From==i || To==i) + .5B(From==i || To ==i)
sys_Y[i][j] = np.sum(line_Y[np.array(line_From==i+1) + np.array(line_To==i+1)]) \
+.5j*np.sum(line_B[np.array(line_From==i+1) + np.array(line_To==i+1)])
elif i<j: #Non Diagonal, -Y(From==i && To==j)
sys_Y[i][j] = -np.sum(line_Y[np.multiply(np.array(line_From==i+1), np.array(line_To==j+1))])
else: #i>j =[j][i]
sys_Y[i][j] = sys_Y[j][i]
sys_G = sys_Y.real
sys_B = sys_Y.imag
return sys_Y, sys_G, sys_B
"""
Parses intial bus information from data
Takes in Bus Data data frame
Returns sys_:
LoadP - active power consumed at node
LoadQ - reactive power consumed at node
BusType - type of bus<(S)lack, (G)enerator, (D)rain>
PGen - Active Power produced by each generator node
VRef - Reference voltages at PV busses
"""
def init_BusData(BusData):
col = np.array(BusData.columns)
sys_BusNum = np.array(BusData[col[0]])
sys_LoadP = np.array(BusData[col[1]])
sys_LoadQ = np.array(BusData[col[2]])
sys_BusType = np.array(BusData[col[3]])
sys_PGen = np.array(BusData[col[4]])
sys_VRef = np.array(BusData[col[5]])
return sys_BusNum, sys_LoadP, sys_LoadQ, sys_BusType, sys_PGen, sys_VRef
"""
Initializes System Data for processing
Takes in sys_:
LoadP - active power consumed at node
LoadQ - reactive power consumed at node
BusType - type of bus<(S)lack, (G)enerator, (D)rain>
PGen - Active Power produced by each generator node
VRef - Reference voltages at PV busses
Returns a 2D array containing each buses's current information
[i,:] - Bus i's information
[:,0] - Bus #
[:,1] - Voltage (V)
[:,2] - Angle (T)
[:,3] - Active Power (P_inj)
[:,4] - P(T,V)-P_inj (mismatch)
[:,5] - Reactive Power (Q_inj)
[:,6] - Q(T,V)-Q_inj (mismatch)
"""
def init_SysData(sys_BusNum, sys_LoadP, sys_LoadQ, sys_BusType, sys_PGen, sys_VRef, sys_G, sys_B, S_Base):
n= sys_LoadP.size
sys_Data = np.zeros((n,7))
sys_Data[:,0] = sys_BusNum
sys_Data[:,1] = sys_VRef #Sets initial voltages to provided reference
sys_Data[:,2] = np.zeros(n) #Sets initial angles to zero
sys_Data[:,3] = (sys_PGen-sys_LoadP)/S_Base #Sets initial power inject to Bus generation minus load in per unit
sys_Data[sys_BusType=='S',3] = (np.sum(sys_LoadP)-np.sum(sys_PGen))/S_Base #Sets initial guess for active power required from slack bus
sys_Data[:,5] = (-sys_LoadQ)/S_Base #Sets initial power inject to Bus generation minus load in per unit
sys_Data[sys_BusType=='S',5] = (-np.sum(sys_LoadQ))/S_Base #Sets initial guess for reactive power required from slack bus
for i in range(n): #Sets initial mismatch to calculated power from (V,T) minus expected inject
sys_Data[i,4] = -sys_Data[i,3]
sys_Data[i,6] = -sys_Data[i,5]
for j in range(n):
sys_Data[i,4] += sys_Data[i,1]*sys_Data[j,1]*\
(sys_G[i,j]*np.cos(sys_Data[i,2]-sys_Data[j,2])+\
sys_B[i,j]*np.sin(sys_Data[i,2]-sys_Data[j,2]))
sys_Data[i,6] += sys_Data[i,1]*sys_Data[j,1]*\
(sys_G[i,j]*np.sin(sys_Data[i,2]-sys_Data[j,2])-\
sys_B[i,j]*np.cos(sys_Data[i,2]-sys_Data[j,2]))
return sys_Data
"""
Determines Jacobian value for a given J_11 cell (dP/dT)
Takes in: i, j, n, V_i, V_j, T_i, T_j, P_i, Q_i, G_ij, B_ij
Returns Jacobian cell value
"""
def Jacobian_PowerFlow_11(i, j, V_i, V_j, T_i, T_j, P_i, Q_i, G_ij, B_ij):
if(i==j):
J_ij = -Q_i - B_ij*(V_i**2)
else:
J_ij = V_i*V_j*(G_ij*np.sin(T_i-T_j)-B_ij*np.cos(T_i-T_j))
return J_ij
"""
Determines Jacobian value for a given J_12 cell (dP/dV)
Takes in: i, j, n, V_i, V_j, T_i, T_j, P_i, Q_i, G_ij, B_ij
Returns Jacobian cell value
"""
def Jacobian_PowerFlow_12(i, j, V_i, V_j, T_i, T_j, P_i, Q_i, G_ij, B_ij):
if(i==j):
J_ij = (P_i/V_i) + G_ij*V_i
else:
J_ij = V_i*(G_ij*np.cos(T_i-T_j)+B_ij*np.sin(T_i-T_j))
return J_ij
"""
Determines Jacobian value for a given J_21 cell (dQ/dT)
Takes in: i, j, n, V_i, V_j, T_i, T_j, P_i, Q_i, G_ij, B_ij
Returns Jacobian cell value
"""
def Jacobian_PowerFlow_21(i, j, V_i, V_j, T_i, T_j, P_i, Q_i, G_ij, B_ij):
if(i==j):
J_ij = P_i-G_ij*(V_i**2)
else:
J_ij = -V_i*V_j*(G_ij*np.cos(T_i-T_j)+B_ij*np.sin(T_i-T_j))
return J_ij
"""
Determines Jacobian value for a given J_22 cell (dQ/dV)
Takes in: i, j, n, V_i, V_j, T_i, T_j, P_i, Q_i, G_ij, B_ij
Returns Jacobian cell value
"""
def Jacobian_PowerFlow_22(i, j, V_i, V_j, T_i, T_j, P_i, Q_i, G_ij, B_ij):
if(i==j):
J_ij = (Q_i/V_i)-B_ij*V_i
else:
J_ij = V_i*(G_ij*np.sin(T_i-T_j)-B_ij*np.cos(T_i-T_j))
return J_ij
"""
Processes 1 iteration of current system data
Takes in sys_Data, a 2D array containing each node's current information
[0] - Bus #
[1] - Voltage (V)
[2] - Angle (T)
[3] - Active Power (P_inj)
[4] - P(T,V)-P_inj (mismatch)
[5] - Reactive Power (Q_inj)
[6] - Q(T,V)-Q_inj (mismatch)
As well as, the systems G and B matrices, and node types
Returns the updated array
"""
def update_SysData(sys_Data, sys_G, sys_B, sys_BusType):
n = sys_BusType.size
D_index = sys_BusType=='D'
G_index = sys_BusType=='G'
S_index = sys_BusType=='S'
"""Determine Jacobian"""
J = np.zeros((2*n,2*n))
for i in range(n):
for j in range(n): #(i, j, V_i, V_j, T_i, T_j, P_i(T,V), Q_i(T,V), G_ij, B_ij)
J[i,j] = Jacobian_PowerFlow_11(i, j, sys_Data[i,1], sys_Data[j,1], sys_Data[i,2], sys_Data[j,2], sys_Data[i,4]+sys_Data[i,3], sys_Data[i,6]+sys_Data[i,5], sys_G[i,j], sys_B[i,j])
J[i,j+n] = Jacobian_PowerFlow_12(i, j, sys_Data[i,1], sys_Data[j,1], sys_Data[i,2], sys_Data[j,2], sys_Data[i,4]+sys_Data[i,3], sys_Data[i,6]+sys_Data[i,5], sys_G[i,j], sys_B[i,j])
J[i+n,j] = Jacobian_PowerFlow_21(i, j, sys_Data[i,1], sys_Data[j,1], sys_Data[i,2], sys_Data[j,2], sys_Data[i,4]+sys_Data[i,3], sys_Data[i,6]+sys_Data[i,5], sys_G[i,j], sys_B[i,j])
J[i+n,j+n] =Jacobian_PowerFlow_22(i, j, sys_Data[i,1], sys_Data[j,1], sys_Data[i,2], sys_Data[j,2], sys_Data[i,4]+sys_Data[i,3], sys_Data[i,6]+sys_Data[i,5], sys_G[i,j], sys_B[i,j])
"""Remove non-implicit values"""
for i in range(n-1,-1,-1):
if S_index[i]:
J=np.delete(J, i+n, 0)
J=np.delete(J, i+n, 1)
J=np.delete(J, i, 0)
J=np.delete(J, i, 1)
elif G_index[i]:
J=np.delete(J, i+n, 0)
J=np.delete(J, i+n, 1)
"""Determine Inverse"""
J_inv = inv(J)
"""Determine Delta T,V"""
PQ = np.concatenate((sys_Data[np.invert(S_index), 4], sys_Data[D_index, 6]))
Delta = -J_inv @ PQ
Delta_T = Delta[0:sum(np.invert(S_index))]
Delta_V = Delta[sum(np.invert(S_index)):sum(np.invert(S_index))+sum(D_index)]
"""Update T for non-slack buses, and V for PQ buses"""
Delta_T_index = 0
Delta_V_index = 0
for i in range(n):
if G_index[i]:
sys_Data[i,2] += Delta_T[Delta_T_index]
Delta_T_index += 1
elif D_index[i]:
sys_Data[i,1] += Delta_V[Delta_V_index]
Delta_V_index += 1
sys_Data[i,2] += Delta_T[Delta_T_index]
Delta_T_index += 1
"""Update P_inj for slack bus, and Q_inj for non PQ buses"""
for i in range(n):
if S_index[i]:#Update Slack P_inj
sys_Data[i,3] = 0
if (S_index[i] or G_index[i]):#Update non PQ Q_inj
sys_Data[i,5] = 0
for j in range(n):
if S_index[i]:#Update Slack
sys_Data[i,3] += sys_Data[i,1]*sys_Data[j,1]*((sys_G[i,j]*np.cos(sys_Data[i,2]-sys_Data[j,2]))+(sys_B[i,j]*np.sin(sys_Data[i,2]-sys_Data[j,2])))
if (S_index[i] or G_index[i]):#Update non PQ
sys_Data[i,5] += sys_Data[i,1]*sys_Data[j,1]*((sys_G[i,j]*np.sin(sys_Data[i,2]-sys_Data[j,2]))-(sys_B[i,j]*np.cos(sys_Data[i,2]-sys_Data[j,2])))
"""Update mismatch columns"""
for i in range(n):
sys_Data[i,4] = -sys_Data[i,3]
sys_Data[i,6] = -sys_Data[i,5]
for j in range(n):
sys_Data[i,4] += sys_Data[i,1]*sys_Data[j,1]*((sys_G[i,j]*np.cos(sys_Data[i,2]-sys_Data[j,2]))+(sys_B[i,j]*np.sin(sys_Data[i,2]-sys_Data[j,2])))
sys_Data[i,6] += sys_Data[i,1]*sys_Data[j,1]*((sys_G[i,j]*np.sin(sys_Data[i,2]-sys_Data[j,2]))-(sys_B[i,j]*np.cos(sys_Data[i,2]-sys_Data[j,2])))
return sys_Data
"""
Takes in voltage and theta values, shunt capacitance, and the admittance matrix
Returns Power Values:
S_ij - Apparent Power
P_ij - Real Power
Q_ij - Reactive Power
"""
def PowerFlow (V_i,T_i,V_j,T_j,B_tot,y_ij):
I_ij = y_ij * (V_i * np.cos(T_i) + 1j * V_i * | np.sin(T_i) | numpy.sin |
# -*- coding: utf-8 -*-
import matplotlib.pyplot as plt
import numpy as np
import csv
import sys
sys.path.append("..\\..\\libraries")
from GraphicLibrary import drawPoint, drawCircle, initDrawing, finishDrawing
from GeometryLibrary import getPoint, angle_radians
filename = "out-dist-0,40m_B-complete.txt"
# Lade Daten
data = list()
with open(filename) as csvDataFile:
csvReader = csv.reader(csvDataFile, delimiter=';')
for row in csvReader:
data.append(row)
data = data[1:]
# Stelle Ergebnisse da
initDrawing(figsize=(16,8))
import matplotlib.cm as cm
from matplotlib.colors import Normalize
cmap = cm.Reds #cm.autumn
norm = Normalize(vmin=0, vmax=1)
error1 = list()
error2 = list()
for d in data:
error1.append(float(d[7]))
error2.append(float(d[11]))
errmax1 = | np.max(error1) | numpy.max |
import numpy as np
np.seterr(all='raise')
import tensorflow as tf
import pandas as pd
class Episode:
def __init__(self, observation, observation_enc, goal, reward):
self.length = 1
self.observations = [observation]
self.observations_enc = [observation_enc]
self.rewards = [reward]
self.actions = []
self.goal = goal
def append(self, action, observation, observation_enc, reward):
self.length += 1
self.actions.append(action)
self.observations.append(observation)
self.observations_enc.append(observation_enc)
self.rewards.append(reward)
class Agent:
def __init__(self, env):
self.env = env
def act(self, observation, goal, greedy):
raise NotImplementedError()
def interact(self, n_episodes=1, greedy=False, render=False):
episodes = []
for i in range(n_episodes):
observation, observation_enc, goal, reward = self.env.reset()
episodes.append(
Episode(
observation,
observation_enc,
goal,
reward))
if render:
print('Episode {0}.\n'.format(i + 1))
self.env.render()
done = False
while not done:
action = self.act(observation, goal, greedy)
observation, observation_enc, reward, done = self.env.step(
action)
episodes[-1].append(action, observation,
observation_enc, reward)
if render:
self.env.render()
return episodes
class RandomAgent(Agent):
def __init__(self, env):
Agent.__init__(self, env)
def act(self, observation, goal, greedy):
return np.random.choice(self.env.n_actions)
class BatchVariables:
def __init__(self, d_observations, d_goals, n_actions):
self.d_observations = np.reshape(d_observations, -1).tolist()
self.d_goals = np.reshape(d_goals, -1).tolist()
self.n_actions = n_actions
self.lengths = tf.placeholder(tf.int32, [None], name='lengths')
self.actions = tf.placeholder(tf.int32, [None, None], name='actions')
self.actions_enc = tf.one_hot(self.actions, self.n_actions)
self.observations = tf.placeholder(tf.float32, [None, None] +
self.d_observations,
name='observations')
self.rewards = tf.placeholder(tf.float32, [None, None], name='rewards')
self.baselines = tf.placeholder(tf.float32, [None, None],
name='baselines')
self.goals = tf.placeholder(tf.float32, [None] + self.d_goals,
name='goals')
self.batch_size = tf.shape(self.observations)[0]
self.max_steps = tf.shape(self.observations)[1]
self.goals_enc = tf.tile(self.goals, [1, self.max_steps])
self.goals_enc = tf.reshape(self.goals_enc, [-1, self.max_steps] +
self.d_goals)
class PolicyNetworkAgent(Agent):
def __init__(self, env, hidden_layers, learning_rate, baseline,
use_vscaling_init, init_session, use_gpu):
Agent.__init__(self, env)
self.hidden_layers = hidden_layers
self.learning_rate = learning_rate
self.baseline = baseline
self.use_vscaling_init = use_vscaling_init
self.create_network()
self.setup()
self.use_gpu = use_gpu
self.saver = tf.train.Saver()
if init_session:
self.init()
def create_network(self):
self.bvars = BatchVariables(self.env.d_observations, self.env.d_goals,
self.env.n_actions)
if len(self.bvars.d_observations) == 1:
self.create_network_1d()
else:
self.create_network_conv()
def create_network_1d(self):
d_input = self.bvars.d_observations[0] + self.bvars.d_goals[0]
inputs = tf.concat(
[self.bvars.observations, self.bvars.goals_enc], axis=2)
output = tf.reshape(inputs, [-1, d_input])
self.variables = []
layers = [d_input] + self.hidden_layers +\
[self.bvars.n_actions]
for i in range(1, len(layers)):
if self.use_vscaling_init:
W_init = tf.variance_scaling_initializer(
mode="fan_avg", distribution="uniform")
W = tf.get_variable('pW_{0}'.format(
i), [layers[i - 1], layers[i]], initializer=W_init)
b = tf.get_variable('pb_{0}'.format(i), layers[i],
initializer=tf.constant_initializer(0.0))
else:
W = tf.Variable(tf.truncated_normal([layers[i - 1], layers[i]],
stddev=0.01),
name='pW_{0}'.format(i))
b = tf.Variable(tf.zeros(layers[i]), name='pb_{0}'.format(i))
self.variables += [W, b]
output = tf.matmul(output, W) + b
if i < len(layers) - 1:
output = tf.tanh(output)
else:
# Note: Probabilities lower bounded by 1e-12
output = tf.nn.softmax(output) + 1e-12
self.policy = tf.reshape(output, [-1, self.bvars.max_steps,
self.bvars.n_actions])
def create_network_conv(self):
n_filters = [32, 64, 64]
filter_sizes = [8, 4, 3]
strides = [4, 2, 1]
hidden_layer_sizes = self.hidden_layers
assert isinstance(hidden_layer_sizes, list)
reshaped_observations = tf.reshape(self.bvars.observations,
[-1] + self.bvars.d_observations)
reshaped_observations = reshaped_observations / 255
with tf.device('/cpu:0'):
with tf.variable_scope("state_processor"):
prev_y = tf.image.resize_images(
reshaped_observations, (84, 84), method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)
with tf.variable_scope("actor"):
for i, (fs, ks, strides) in enumerate(
zip(n_filters, filter_sizes, strides)):
init = tf.variance_scaling_initializer(
mode="fan_avg", distribution="uniform")
prev_y = tf.layers.conv2d(prev_y, fs, ks,
strides=strides,
activation=tf.tanh,
name="rep_conv_" + str(i),
kernel_initializer=init)
obs_enc_flat = tf.contrib.layers.flatten(prev_y)
goals_reshaped = tf.reshape(self.bvars.goals_enc,
[-1] + self.bvars.d_goals)
inputs = tf.concat([obs_enc_flat, goals_reshaped], axis=1)
prev_y = inputs
for i, layer_size in enumerate(hidden_layer_sizes):
init = tf.variance_scaling_initializer(
mode="fan_avg", distribution="uniform")
prev_y = tf.layers.dense(prev_y, layer_size,
activation=tf.tanh,
kernel_initializer=init,
name="rep_dense_" + str(i))
self.policy = tf.layers.dense(
prev_y, self.bvars.n_actions, name="op", activation=None)
self.policy = tf.nn.softmax(self.policy) + 1e-12
self.policy = tf.reshape(self.policy, [-1, self.bvars.max_steps,
self.bvars.n_actions])
# self.variables = tf.trainable_variables()
self.variables = tf.get_collection(
tf.GraphKeys.GLOBAL_VARIABLES, scope='actor')
def feed(self, episodes):
feed = {}
lengths = np.array([e.length for e in episodes], dtype=np.int32)
batch_size = len(episodes)
max_steps = max(lengths)
actions = np.zeros((batch_size, max_steps - 1), dtype=np.int32)
observations = np.zeros([batch_size, max_steps] +
self.bvars.d_observations, dtype=np.float32)
rewards = np.zeros((batch_size, max_steps), dtype=np.float32)
baselines = np.zeros((batch_size, max_steps - 1), dtype=np.float32)
for i in range(batch_size):
actions[i, :lengths[i] - 1] = episodes[i].actions
for j in range(lengths[i]):
observations[i, j] = episodes[i].observations[j]
rewards[i, :lengths[i]] = episodes[i].rewards
v = self.baseline.evaluate(episodes[i], episodes[i].goal)
baselines[i, :lengths[i] - 1] = v
feed[self.bvars.lengths] = lengths
feed[self.bvars.actions] = actions
feed[self.bvars.observations] = observations
feed[self.bvars.rewards] = rewards
feed[self.bvars.baselines] = baselines
feed[self.bvars.goals] = np.array([e.goal for e in episodes],
dtype=np.float32)
return feed
def init(self):
if self.use_gpu:
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.3)
config = tf.ConfigProto(gpu_options=gpu_options)
else:
config = tf.ConfigProto(device_count={'GPU': 0})
self.session = tf.Session(config=config)
self.session.run(tf.global_variables_initializer())
self.baseline.reset(self.session)
def close(self):
self.session.close()
def act(self, observation, goal, greedy):
goal = np.array([goal], dtype=np.float32)
observation = np.array([[observation]], dtype=np.float32)
feed = {self.bvars.lengths: [1], self.bvars.goals: goal,
self.bvars.observations: observation}
policy = self.session.run(self.policy, feed)[0][0]
if greedy:
return np.argmax(policy)
a = np.random.choice(self.bvars.n_actions, p=policy)
return a
def train(self, n_batches, batch_size, eval_freq, eval_size):
ereturns = []
treturns = []
plosses = []
blosses = []
print('Training for {0} batches.'.format(n_batches))
episodes = self.interact(eval_size, greedy=True, render=False)
ereturns.append(np.mean([np.sum(e.rewards) for e in episodes]))
print('Batch 0. Return (e): {0:.3f}.'.format(ereturns[-1]))
for t in range(1, n_batches + 1):
episodes = self.interact(batch_size, greedy=False, render=False)
treturns.append(np.mean([np.sum(e.rewards) for e in episodes]))
train_losses = self.train_step(episodes)
if isinstance(train_losses, tuple):
# HAC
plosses.append(train_losses[0])
blosses.append(train_losses[1])
else:
plosses.append(train_losses)
blosses.append(self.baseline.train_step(episodes))
if t % eval_freq == 0:
episodes = self.interact(eval_size, greedy=True, render=False)
ereturns.append(np.mean([np.sum(e.rewards) for e in episodes]))
msg = 'Batch {0}. Return (e): {1:.3f}. Return (t): {2:.3f}. '
msg += 'Policy loss (t): {3:.3f}. Baseline loss (t): {4:.3f}.'
aret = np.mean(treturns[-eval_freq:])
aploss = np.mean(plosses[-eval_freq:])
abloss = np.mean(blosses[-eval_freq:])
print(msg.format(t, ereturns[-1], aret, aploss, abloss))
ereturns, treturns = np.array(ereturns), | np.array(treturns) | numpy.array |
import os
import timeit
from typing import List
import numpy as np
from numpy.random import RandomState
from numpy.testing import assert_allclose, assert_almost_equal
import pytest
from scipy.special import gamma
import arch.univariate.recursions_python as recpy
CYTHON_COVERAGE = os.environ.get("ARCH_CYTHON_COVERAGE", "0") in ("true", "1", "True")
try:
import arch.univariate.recursions as rec_cython
missing_extension = False
except ImportError:
missing_extension = True
if missing_extension:
rec = recpy
else:
rec = rec_cython
try:
import numba # noqa
missing_numba = False
except ImportError:
missing_numba = True
pytestmark = pytest.mark.filterwarnings("ignore::arch.compat.numba.PerformanceWarning")
class Timer(object):
def __init__(
self,
first,
first_name,
second,
second_name,
model_name,
setup,
repeat=5,
number=10,
) -> None:
self.first_code = first
self.second_code = second
self.setup = setup
self.first_name = first_name
self.second_name = second_name
self.model_name = model_name
self.repeat = repeat
self.number = number
self._run = False
self.times: List[float] = []
self._codes = [first, second]
self.ratio = np.inf
def display(self):
if not self._run:
self.time()
self.ratio = self.times[0] / self.times[1]
title = self.model_name + " timing"
print("\n" + title)
print("-" * len(title))
print(self.first_name + ": " + "{:0.3f} ms".format(1000 * self.times[0]))
print(self.second_name + ": " + "{:0.3f} ms".format(1000 * self.times[1]))
if self.ratio < 1:
print(
"{0} is {1:0.1f}% faster".format(
self.first_name, 100 * (1 / self.ratio - 1)
)
)
else:
print(
"{0} is {1:0.1f}% faster".format(
self.second_name, 100 * (self.ratio - 1)
)
)
print(
self.first_name
+ "/"
+ self.second_name
+ " Ratio: {:0.3f}\n".format(self.ratio)
)
def time(self):
self.times = []
for code in self._codes:
timer = timeit.Timer(code, setup=self.setup)
self.times.append(min(timer.repeat(self.repeat, self.number)))
class TestRecursions(object):
@classmethod
def setup_class(cls):
cls.nobs = 1000
cls.rng = RandomState(12345)
cls.resids = cls.rng.standard_normal(cls.nobs)
cls.sigma2 = np.zeros_like(cls.resids)
var = cls.resids.var()
var_bounds = np.array([var / 1000000.0, var * 1000000.0])
cls.var_bounds = np.ones((cls.nobs, 2)) * var_bounds
cls.backcast = 1.0
cls.timer_setup = """
import numpy as np
import arch.univariate.recursions as rec
import arch.univariate.recursions_python as recpy
nobs = 10000
resids = np.random.standard_normal(nobs)
sigma2 = np.zeros_like(resids)
var = resids.var()
backcast = 1.0
var_bounds = np.array([var / 1000000.0, var * 1000000.0])
var_bounds = np.ones((nobs, 2)) * var_bounds
"""
def test_garch(self):
nobs, resids = self.nobs, self.resids
sigma2, backcast = self.sigma2, self.backcast
parameters = np.array([0.1, 0.4, 0.3, 0.2])
fresids = resids ** 2.0
sresids = np.sign(resids)
recpy.garch_recursion(
parameters,
fresids,
sresids,
sigma2,
1,
1,
1,
nobs,
backcast,
self.var_bounds,
)
sigma2_numba = sigma2.copy()
recpy.garch_recursion_python(
parameters,
fresids,
sresids,
sigma2,
1,
1,
1,
nobs,
backcast,
self.var_bounds,
)
sigma2_python = sigma2.copy()
rec.garch_recursion(
parameters,
fresids,
sresids,
sigma2,
1,
1,
1,
nobs,
backcast,
self.var_bounds,
)
assert_almost_equal(sigma2_numba, sigma2)
assert_almost_equal(sigma2_python, sigma2)
parameters = np.array([0.1, -0.4, 0.3, 0.2])
recpy.garch_recursion_python(
parameters,
fresids,
sresids,
sigma2,
1,
1,
1,
nobs,
backcast,
self.var_bounds,
)
assert np.all(sigma2 >= self.var_bounds[:, 0])
assert np.all(sigma2 <= 2 * self.var_bounds[:, 1])
parameters = np.array([0.1, 0.4, 3, 2])
recpy.garch_recursion_python(
parameters,
fresids,
sresids,
sigma2,
1,
1,
1,
nobs,
backcast,
self.var_bounds,
)
assert np.all(sigma2 >= self.var_bounds[:, 0])
assert np.all(sigma2 <= 2 * self.var_bounds[:, 1])
parameters = np.array([0.1, 0.4, 0.3, 0.2])
mod_fresids = fresids.copy()
mod_fresids[:1] = np.inf
recpy.garch_recursion_python(
parameters,
mod_fresids,
sresids,
sigma2,
1,
1,
1,
nobs,
backcast,
self.var_bounds,
)
assert np.all(sigma2 >= self.var_bounds[:, 0])
assert np.all(sigma2 <= 2 * self.var_bounds[:, 1])
rec.garch_recursion(
parameters,
mod_fresids,
sresids,
sigma2,
1,
1,
1,
nobs,
backcast,
self.var_bounds,
)
assert np.all(sigma2 >= self.var_bounds[:, 0])
assert np.all(sigma2 <= 2 * self.var_bounds[:, 1])
def test_harch(self):
nobs, resids = self.nobs, self.resids
sigma2, backcast = self.sigma2, self.backcast
parameters = np.array([0.1, 0.4, 0.3, 0.2])
lags = np.array([1, 5, 22], dtype=np.int32)
recpy.harch_recursion_python(
parameters, resids, sigma2, lags, nobs, backcast, self.var_bounds
)
sigma2_python = sigma2.copy()
recpy.harch_recursion(
parameters, resids, sigma2, lags, nobs, backcast, self.var_bounds
)
sigma2_numba = sigma2.copy()
rec.harch_recursion(
parameters, resids, sigma2, lags, nobs, backcast, self.var_bounds
)
assert_almost_equal(sigma2_numba, sigma2)
assert_almost_equal(sigma2_python, sigma2)
parameters = np.array([-0.1, -0.4, 0.3, 0.2])
recpy.harch_recursion_python(
parameters, resids, sigma2, lags, nobs, backcast, self.var_bounds
)
assert np.all(sigma2 >= self.var_bounds[:, 0])
assert np.all(sigma2 <= 2 * self.var_bounds[:, 1])
parameters = np.array([0.1, 4e8, 3, 2])
recpy.harch_recursion_python(
parameters, resids, sigma2, lags, nobs, backcast, self.var_bounds
)
assert np.all(sigma2 >= self.var_bounds[:, 0])
assert np.all(sigma2 <= 2 * self.var_bounds[:, 1])
parameters = np.array([0.1, 4e8, 3, 2])
mod_resids = resids.copy()
mod_resids[:10] = np.inf
recpy.harch_recursion_python(
parameters, mod_resids, sigma2, lags, nobs, backcast, self.var_bounds
)
assert np.all(sigma2 >= self.var_bounds[:, 0])
assert np.all(sigma2 <= 2 * self.var_bounds[:, 1])
rec.harch_recursion(
parameters, mod_resids, sigma2, lags, nobs, backcast, self.var_bounds
)
assert np.all(sigma2 >= self.var_bounds[:, 0])
assert np.all(sigma2 <= 2 * self.var_bounds[:, 1])
def test_arch(self):
nobs, resids = self.nobs, self.resids
sigma2, backcast = self.sigma2, self.backcast
parameters = np.array([0.1, 0.4, 0.3, 0.2])
p = 3
recpy.arch_recursion_python(
parameters, resids, sigma2, p, nobs, backcast, self.var_bounds
)
sigma2_python = sigma2.copy()
recpy.arch_recursion(
parameters, resids, sigma2, p, nobs, backcast, self.var_bounds
)
sigma2_numba = sigma2.copy()
rec.arch_recursion(
parameters, resids, sigma2, p, nobs, backcast, self.var_bounds
)
assert_almost_equal(sigma2_numba, sigma2)
assert_almost_equal(sigma2_python, sigma2)
parameters = np.array([-0.1, -0.4, 0.3, 0.2])
recpy.arch_recursion_python(
parameters, resids, sigma2, p, nobs, backcast, self.var_bounds
)
assert np.all(sigma2 >= self.var_bounds[:, 0])
assert np.all(sigma2 <= 2 * self.var_bounds[:, 1])
parameters = np.array([0.1, 4e8, 3, 2])
recpy.arch_recursion_python(
parameters, resids, sigma2, p, nobs, backcast, self.var_bounds
)
assert | np.all(sigma2 >= self.var_bounds[:, 0]) | numpy.all |
# Select and shuffle a random subset of available data, and apply data augmentation techniques.
# I tried adding a separate thread to fill a queue of unzipped spectrograms,
# but it didn't actually improve performance.
import random
import sys
import numpy as np
import skimage
from skimage import filters
from core import audio
from core import constants
from core import util
from core import plot
PROB_MERGE = 0.2 # probability of merging to train multi-label support
PROB_AUG = 0.55 # probability of augmentation
CACHE_LEN = 1000 # cache this many noise specs for performance
MAX_SHIFT = 5 # max pixels for horizontal shift
NOISE_VARIANCE = 0.0015 # larger variances lead to more noise
SPECKLE_VARIANCE = .009
# relative frequencies of the augmentation types
BLUR_INDEX = 0 # so FREQS[0] is relative frequency of blur
FADE_INDEX = 1
LOW_INDEX = 2
NOISE_INDEX = 3
SHIFT_INDEX = 4
SPECKLE_INDEX = 5
FREQS = [0.25, 0.5, 0.2, 1.0, 0.5, 0.9]
class DataGenerator():
def __init__(self, x_train, y_train, seed=None, augmentation=True, binary_classifier=False, multilabel=False):
self.x_train = x_train
self.y_train = y_train
self.seed = seed
self.augmentation = augmentation
self.binary_classifier = binary_classifier
self.multilabel = multilabel
if binary_classifier:
self.spec_height = constants.BINARY_SPEC_HEIGHT
else:
self.spec_height = constants.SPEC_HEIGHT
self.indices = np.arange(y_train.shape[0])
if self.augmentation:
# convert relative frequencies to probability ranges in [0, 1]
freqs = np.array(FREQS)
sum = np.sum(freqs)
probs = freqs / sum
self.probs = np.zeros(SPECKLE_INDEX + 1)
self.probs[0] = probs[0]
for i in range(1, SPECKLE_INDEX + 1):
self.probs[i] = self.probs[i - 1] + probs[i]
# self.local_vars is used in _add_low_noise;
# use exponentially more noise in the low frequencies
self.local_vars = np.zeros((self.spec_height, constants.SPEC_WIDTH, 1))
for row in range(0, self.spec_height):
max_val = ((self.spec_height - row) ** 4 / self.spec_height ** 4) / 70
for col in range(0, constants.SPEC_WIDTH):
self.local_vars[row, col, 0] = np.random.uniform(0.001, max_val)
# creating these here instead of during augmentation saves a lot of time
self.noise = np.zeros((CACHE_LEN, self.spec_height, constants.SPEC_WIDTH, 1))
for i in range(CACHE_LEN):
self.noise[i] = skimage.util.random_noise(self.noise[i], mode='gaussian', seed=self.seed, var=NOISE_VARIANCE, clip=True)
self.low_noise = np.zeros((CACHE_LEN, self.spec_height, constants.SPEC_WIDTH, 1))
for i in range(CACHE_LEN):
self.low_noise[i] = skimage.util.random_noise(self.low_noise[i], mode='localvar', seed=self.seed, local_vars=self.local_vars, clip=True)
self.speckle = | np.zeros((CACHE_LEN, self.spec_height, constants.SPEC_WIDTH, 1)) | numpy.zeros |
import torch
from torch.utils.data import DistributedSampler as _DistributedSampler
import math
import numpy as np
import random
class DistributedSampler(_DistributedSampler):
def __init__(self,
dataset,
num_replicas=None,
rank=None,
shuffle=True,
round_up=True):
super().__init__(dataset, num_replicas=num_replicas, rank=rank)
self.shuffle = shuffle
self.round_up = round_up
if self.round_up:
self.total_size = self.num_samples * self.num_replicas
else:
self.total_size = len(self.dataset)
# added to adapt PK sampling strategy
self.do_pk = hasattr(dataset, "K")
if self.do_pk:
if self.rank == 0:
print("Start using PK sampling strategy!")
self.spkr_dataset_ids = dataset.spkr_dataset_ids
self.K = dataset.K
self.P = dataset.P
self.batch_size = self.P*self.K
def __iter__(self):
if not self.do_pk:
# deterministically shuffle based on epoch
if self.shuffle:
g = torch.Generator()
g.manual_seed(self.epoch)
indices = torch.randperm(len(self.dataset), generator=g).tolist()
else:
indices = torch.arange(len(self.dataset)).tolist()
# add extra samples to make it evenly divisible
if self.round_up:
indices += indices[:(self.total_size - len(indices))]
assert len(indices) == self.total_size
# subsample
indices = indices[self.rank:self.total_size:self.num_replicas]
if self.round_up:
assert len(indices) == self.num_samples
return iter(indices)
else:
lol = lambda lst, sz: [lst[i:i + sz] for i in range(0, len(lst), sz)]
items = list(self.spkr_dataset_ids.items())
# metric learning naturally needs shuffle to be True
g = torch.Generator()
g.manual_seed(self.epoch)
flattened_list = []
flattened_label = []
for spkr, ids in items:
numSeg = (len(ids) // self.K) * self.K
rp = lol(torch.randperm(len(ids), generator=g).tolist()[:numSeg], self.K)
flattened_label.extend([spkr]*len(rp))
for indices in rp:
flattened_list.append([ids[i] for i in indices])
mixid = torch.randperm(len(flattened_label), generator=g).tolist()
mixlabel = []
mixmap = []
assert self.batch_size % self.K == 0, \
"batchsize %d should be exactly divided by K %d" % (self.batch_size, self.K)
tuple_batch_size = self.batch_size // self.K
for ii in mixid:
startbatch = len(mixlabel) - len(mixlabel) % tuple_batch_size
if flattened_label[ii] not in mixlabel[startbatch:]:
mixlabel.append(flattened_label[ii])
mixmap.append(ii)
all_indices = []
for idx in mixmap:
all_indices.extend(flattened_list[idx])
round_len = (len(all_indices) // (self.num_replicas * self.batch_size)) * self.batch_size
sub_indices = all_indices[self.rank * round_len: (self.rank+1) * round_len]
# since round_len is definitely a bit smaller than the original len,
# to complement the original length, some chunks will be oversampled randomly
if self.round_up:
epoch_iter = math.ceil(self.total_size / (self.batch_size * self.num_replicas))
truncated_iter = round_len // self.batch_size
sub_indices = np.asarray(sub_indices)
split_batches = | np.split(sub_indices, truncated_iter) | numpy.split |
import numpy as np
import pandas as pd
from gym.utils import seeding
import gym
from gym import spaces
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import pickle
from stable_baselines3.common.vec_env import DummyVecEnv, SubprocVecEnv
from stable_baselines3.common.logger import Logger as log
import random
from copy import deepcopy
class StockTradingEnv(gym.Env):
"""A stock trading environment for OpenAI gym"""
metadata = {'render.modes': ['human']}
def __init__(self,
df,
stock_dim,
hmax,
initial_amount,
buy_cost_pct,
sell_cost_pct,
reward_scaling,
state_space,
action_space,
mode='',
out_of_cash_penalty=0.01,
cash_limit=0.1):
self.df = df #数据
self.stock_dim = stock_dim #股票数量
self.hmax = hmax #每日最大交易数量
self.initial_amount = initial_amount #启动资金
self.buy_cost_pct = buy_cost_pct #买摩擦费用
self.sell_cost_pct = sell_cost_pct #卖摩擦费用
self.reward_scaling = reward_scaling #奖励放大倍数
self.state_space = state_space #状态维度
self.action_space = action_space #操作维度
self.mode=mode #模式 'test' 'train'
self.out_of_cash_penalty = out_of_cash_penalty #资金太少的惩罚
self.cash_limit = cash_limit #资金极限占比
###################################################################################
self.action_space = spaces.Box(low = -1, high = 1,shape = (self.action_space,))
self.observation_space = spaces.Box(low=-np.inf, high=np.inf, shape = (self.state_space,))
####################################################################################
self.day_start = 0 #开始日期
self.day = self.day_start #当前日期
self.cash = self.initial_amount #现金
self.holds = [0]*self.stock_dim #持仓
self.cost = 0
self.count_0 = 0 #为了提高采样的质量,记录无操作的次数
self.actions_memory=[]
self.date_memory=[]
self.asset_memory=[]
def reset(self):
if self.mode == 'train':
lll = len(self.df.date.unique())
length = int(lll*0.95)
day_start = random.choice(range(length))
self.day_start = 0
else:
self.day_start = 0
print("day_start {0}".format(self.day_start))
self.day = self.day_start
self.cash = self.initial_amount #现金.如果是train,应该根据域范围随机得到
self.holds = [0]*self.stock_dim #持仓
self.cost = 0
self.count_0 = 0
self.actions_memory=[]
self.date_memory=[]
self.asset_memory=[]
self.cash_memory = []
self.date_memory.append(self._get_date())
self.asset_memory.append(self.cash)
self.cash_memory.append(self.cash)
#if self.mode == 'train':
#self._initial_cash_and_buy_()
state = self._update_state()
return state
def _initial_cash_and_buy_(self):
"""Initialize the state, already bought some"""
data = self.df.loc[self.day, :]
'''
cash_max = max(data.cash_max)
cash_min = min(data.cash_min)
if cash_max > 10000*10:
cash_max = 10000*10
if cash_min < 10000*0.1:
cash_min = 10000*0.1
cash_u = random.uniform(cash_min, cash_max)
self.cash = self.initial_amount/10000 * cash_u
'''
prices = data.close.values.tolist()
avg_price = sum(prices)/len(prices)
ran = random.random() #随机买。因为开始日期是随机的,initial_amount也可以是随机的。需要新加域,表明当前的cash范围,然后在范围内随机一个值
buy_nums_each_tic = ran*self.cash//(avg_price*len(prices)) # only use half of the initial amount
buy_nums_each_tic = buy_nums_each_tic//100*100
cost = sum(prices)*buy_nums_each_tic
self.cash = self.cash - cost
self.holds = [buy_nums_each_tic]*self.stock_dim
'''
state = [self.initial_amount-cost] + \
self.data.close.values.tolist() + \
[buy_nums_each_tic]*self.stock_dim + \
sum([self.data[tech].values.tolist() for tech in self.tech_indicator_list ], [])
'''
def step(self, actions):
#print('step')
#actions = actions * self.hmax #actions initially is scaled between 0 to 1
#actions = (actions.astype(int)) #convert into integer because we can't by fraction of shares
actions_old = None
if self.mode == 'test':
actions_old = actions.copy()
begin_total_asset = self._update_total_assets()
stocks_can_buy = self._get_can_buy()
stocks_can_sell = -np.array(self.holds)
base_ = np.array([-1]*self.stock_dim)
actions = (actions - base_)/2*(stocks_can_buy - stocks_can_sell)+stocks_can_sell
argsort_actions = np.argsort(actions) #索引排序
sell_index = argsort_actions[:np.where(actions < 0)[0].shape[0]] #得到卖的索引
buy_index = argsort_actions[::-1][: | np.where(actions > 0) | numpy.where |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import seaborn as sns
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.gridspec import GridSpec
import numpy as np
import scipy as sp
sns.set(style="ticks")
def theta_pairs(samples_dict,design_names=None,native=False,lims=None,theta_ref=None,save=None):
"""
Create pairs plot of sampled thetas.
:param dict samples_dict: samples from model.get_samples()
:param list/NoneType design_names: list of string names for thetas, optional (None will use default names)
:param bool native: put theta on native scale? (note: you likely want to pass lims in this case)
:param list lims: list of tuples, limits for each theta value for plotting; defaults to [0, 1] if native=False
:param list theta_ref: scalar reference values to plot as vlines on histplots and as red dots on bivariate plots
:param str save: file name to save plot
:returns: matplotlib figure
"""
if 'theta' not in samples_dict.keys():
print('No thetas to plot')
return
if native is False:
theta = samples_dict['theta']
else:
theta = samples_dict['theta_native']
n_samp, n_theta = theta.shape
if native is False and lims is None:
lims = [(0, 1) for i in range(n_theta)]
if isinstance(design_names, list) and len(design_names) != n_theta:
raise ValueError('Design names wrong length')
if design_names is None:
design_names = ['theta_%d' % (i+1) for i in range(n_theta)]
thin_idx = np.linspace(0,n_samp-1,np.min([n_samp-1, 1000]),dtype=int) # thin to at most 1000 samples
theta_df = pd.DataFrame(theta[thin_idx,:],columns=design_names) # take only 1000 samples to dataframe
theta_df.insert(0,'idx',theta_df.index,allow_duplicates = False)
if theta_df.shape[1]>2:
g = sns.PairGrid(theta_df.loc[:, theta_df.columns != 'idx'], diag_sharey=False);
g.map_upper(sns.scatterplot, palette = 'coolwarm', hue=theta_df['idx'], legend=False);
g.map_lower(sns.kdeplot, cmap="viridis", fill=True, thresh=0.05);
g.map_diag(sns.histplot, kde=True);
if lims is not None:
# Undo sharing of axes
for i in range(n_theta):
[g.diag_axes[i].get_shared_x_axes().remove(axis) for axis in g.axes.ravel()];
for j in range(n_theta):
[g.axes[i, j].get_shared_x_axes().remove(axis) for axis in g.axes.ravel()];
[g.axes[i, j].get_shared_y_axes().remove(axis) for axis in g.axes.ravel()];
[g.axes[i, j].get_shared_x_axes().remove(axis) for axis in g.diag_axes.ravel()];
[g.axes[i, j].get_shared_y_axes().remove(axis) for axis in g.diag_axes.ravel()];
# Set limits
for i in range(n_theta):
for j in range(n_theta):
if i == j:
g.diag_axes[i].set_xlim(xmin=lims[i][0], xmax=lims[i][1]);
g.axes[i, i].set_xlim(xmin=lims[i][0], xmax=lims[i][1]);
else:
g.axes[i, j].set_xlim(xmin=lims[j][0], xmax=lims[j][1]);
g.axes[i, j].set_ylim(ymin=lims[i][0], ymax=lims[i][1]);
if theta_ref is not None:
for i in range(n_theta):
g.diag_axes[i].vlines(theta_ref[i],ymin=0,ymax=1,transform = g.diag_axes[i].get_xaxis_transform(),color='r');
for j in range(n_theta):
if i>j: # Lower diag contour plots
g.axes[i,j].scatter(theta_ref[j], theta_ref[i], marker='o', s=5, color="red");
if save is not None:
plt.tight_layout()
plt.savefig(save,dpi=300,bbox_inches='tight')
return g.fig
else:
fig,ax=plt.subplots()
ax.set_xlabel(design_names[0])
sns.histplot(theta_df.loc[:, theta_df.columns != 'idx'],kde=True,ax=ax)
if save is not None:
plt.tight_layout()
plt.savefig(save,dpi=300,bbox_inches='tight')
return fig
def mcmc_trace(samples_dict,theta_names=None,start=0,end=None,n_to_plot=500,by_group=True,max_print=10,save=None):
"""
Create trace plot of MCMC samples.
:param dict samples_dict: samples from model.get_samples()
:param list/NoneType theta_names: list of string names for thetas, optional (None will use default names)
:param int start: where to start plotting traces (sample index)
:param int/NoneType end: where to end plotting traces (sample index)
:param int n_to_plot: how many samples to show
:param bool by_group: group params of the same name onto one axis?
:param int max_print: maximum number of traces to plot
:param str save: file name to save plot
:returns: matplotlib figure
"""
# trim samples dict
n_samples = samples_dict['lamUz'].shape[0]
if n_to_plot>n_samples:
n_to_plot = n_samples
# default end
if end is None:
end = n_samples-1
# check start is valid
if not isinstance(start,int) or start<0 :
raise TypeError('invalid start index')
# check end is valid
if end is not None and (start>end or end<0 or not isinstance(end,int) or end > n_samples):
raise TypeError('invalid end index')
# which indices to plot
if (end-start) > n_to_plot:
plot_idx = np.unique(np.linspace(start,end,n_to_plot,dtype=int))
else:
plot_idx = np.arange(start,end,1,dtype=int)
if not by_group:
total_plots = 0
for i,k in enumerate(samples_dict.keys()):
if k == 'theta_native':
continue
total_plots += min(samples_dict[k].shape[1],max_print)
fig,axs = plt.subplots(total_plots,1,sharex=True,figsize=[10,1.5*total_plots])
fig.subplots_adjust(hspace=0)
axs_idx = 0
for i, k in enumerate(samples_dict.keys()):
if k == 'theta_native':
continue
n_theta = min(samples_dict[k].shape[1],max_print)
if n_theta > 1:
for j in range(n_theta):
sns.lineplot(x=plot_idx,y=samples_dict[k][plot_idx,j], palette="tab10", linewidth=.75, ax = axs[axs_idx])
if k=='theta' and theta_names is not None: axs[axs_idx].set_ylabel(theta_names[j])
else: axs[axs_idx].set_ylabel(k+'_'+str(j+1))
axs_idx+=1
else:
sns.lineplot(x=plot_idx,y=samples_dict[k][plot_idx,0], palette="tab10", linewidth=.75, ax = axs[axs_idx])
if k=='theta' and theta_names is not None: axs.set_ylabel(theta_names[0])
else: axs[axs_idx].set_ylabel(k)
axs_idx+=1
if save is not None: plt.savefig(save,dpi=300, bbox_inches='tight')
return fig
else:
lgds = []
n_axes = len(samples_dict)-1 if 'theta_native' in samples_dict.keys() else len(samples_dict) # dont plot theta_native
fig, axs = plt.subplots(n_axes,1,sharex=True,figsize=[10,1.5*n_axes])
fig.subplots_adjust(hspace=0)
for i, k in enumerate(samples_dict.keys()):
if k == 'theta_native':
continue
n_lines = min(samples_dict[k].shape[1],max_print)
if n_lines > 1:
for j in range(n_lines):
sns.lineplot(x=plot_idx,y=samples_dict[k][plot_idx,j], palette="tab10", linewidth=.75, ax = axs[i],
label= theta_names[j] if (i==0 and theta_names is not None) else k+str(j+1))
axs[i].set_ylabel(k)
lgds.append(axs[i].legend(bbox_to_anchor=(1.025, 1), loc='upper left', borderaxespad=0., ncol=int(np.ceil(n_lines/5))))
else:
sns.lineplot(x=plot_idx,y=samples_dict[k][plot_idx,0], palette="tab10", linewidth=.75, ax = axs[i])
axs[i].set_ylabel(theta_names[0] if (i==0 and theta_names is not None) else k)
if save is not None: plt.savefig(save,dpi=300,bbox_extra_artists=lgds, bbox_inches='tight')
return fig
def param_stats(samples_dict,theta_names=None,q1=0.05,q2=0.95,digits=4):
"""
Compute statistics on the samples.
:param dict samples_dict: samples from model.get_samples()
:param list/NoneType theta_names: list of string names for thetas, optional (None will use default names)
:param float q1: lower quantile in [0, 1]
:param float q2: upper quantile in [0, 1]
:param int digits: how many digits to show in output
:return: pandas DataFrame containing statistics
"""
# theta_names : list
# samples_dict : dictionary of samples
# stats : dataframe with mean and std of all parameters
if 'theta' in samples_dict.keys():
n_theta = samples_dict['theta'].shape[1]
if theta_names is not None and len(theta_names) != n_theta:
print('theta_names should have',n_theta, 'entries')
return
mean = []
sd = []
keys = []
q1_list = []
q2_list = []
for i, k in enumerate(samples_dict.keys()):
n_param = samples_dict[k].shape[1]
for j in range(n_param):
mean.append(np.round(np.mean(samples_dict[k][:, j]),digits))
sd.append(np.round(np.std(samples_dict[k][:, j]),digits))
q1_list.append(np.round(np.quantile(samples_dict[k][:, j],q1),digits))
q2_list.append(np.round(np.quantile(samples_dict[k][:, j],q2),digits))
if i==0 and theta_names is not None: keys.append(theta_names[j])
elif n_param>1: keys.append(k+'_'+str(j+1))
else: keys.append(k)
stats = pd.DataFrame({'mean':mean,'sd':sd,'{} quantile'.format(q1):q1_list,\
'{} quantile'.format(q2):q2_list},index=keys)
return stats
def rho_box_plots(model,labels=None):
"""
Show rho box plots. (Rho are the transformed betaU parameters, corresponding to GP lengthscales)
:param sepia.SepiaModel model: SepiaModel object
:param list/NoneType labels: optional labels to use for box plot
:return: matplotlib figure
"""
samples_dict = {p.name: p.mcmc_to_array() for p in model.params.mcmcList}
p = model.num.p
q = model.num.q
pu = model.num.pu
bu = samples_dict['betaU']
ru = np.exp(-bu / 4)
fig,axs = plt.subplots(nrows=pu,tight_layout=True,figsize=[5,3*pu],squeeze=False)
for i,ax in enumerate(axs.flatten()):
r = ru[:, ((p+q)*i):((p+q)*i)+(p+q)]
ax.boxplot(r)
if labels is not None: ax.set_xticklabels(labels)
ax.set_yticks(np.arange(0,1.2,.2))
ax.set_ylabel(r'$\rho$')
ax.set_title('PC {}'.format(i+1))
return fig
def plot_acf(model,nlags,nburn=0,alpha=.05,save=None):
"""
Plot autocorrelation function for all parameters theta.
:param sepia.SepiaModel model: SepiaModel object
:param int nlags: how many lags to compute/plot
:param int nburn: how many samples to burn
:param float alpha: confidence level for acf significance line (0,1)
:param str save: file name to save figure
:return: matplotlib figure
"""
if nlags>model.get_num_samples():
raise ValueError('plot_acf: must have more samples than requested lag size')
if alpha <= 0 or alpha >= 1:
raise ValueError('alpha must be in (0,1)')
if model.num.sim_only:
print('ACF needs thetas but this is a sim-only model.')
return
# get theta chains
for p in model.params.mcmcList:
if p.name == 'theta':
chain = p.mcmc_to_array(flat=True).T
acf = model.acf(chain,nlags,plot=True,alpha=alpha)
if save is not None:
acf['figure'].savefig(save,dpi=300,bbox_inches='tight')
return acf
def plot_K_basis(data, max_plots=4, obs=True):
"""
Plots K basis elements for both sim and obs indices (if applicable). Only applies to multivariate-output models.
TODO: Lamy should be 1/Sigy_std
:param SepiaData data: SepiaData object
:param int max_plots: maximum number of principal components to plot
:return: tuple containing matplotlib figure objects: (fig_sim, fig_obs) or just fig_sim if no observed data is present
"""
# Return early if scalar out or basis not set up
if data.scalar_out:
print('Scalar output, no K basis to plot.')
return
if data.sim_data.K is None:
print('K basis not set up, call create_K_basis() first.')
return
# Plot sim basis
pu = data.sim_data.K.shape[0]
ncol = 5
nrow = int(np.ceil((min(pu, max_plots) + 1) / ncol)) # add 1 for mean line
fig_sim, axs_sim = plt.subplots(nrow, ncol, figsize=(12, 2 * nrow))
fig_sim.tight_layout()
for i, ax in enumerate(axs_sim.flatten()):
if i == 0: # plot mean line
ax.plot(data.sim_data.y_ind, np.mean(data.sim_data.K,axis=0))
ax.set_title('sim mean')
ax.set_ylabel('sim K basis')
ax.set_xlabel('sim y_ind')
elif i < pu+1:
ax.plot(data.sim_data.y_ind, data.sim_data.K.T[:,i-1])
ax.set_title('PC %d' % (i))
ax.set_xlabel('sim y_ind')
else:
ax.axis('off')
# If obs are present and requested, plot obs basis
if not data.sim_only and obs:
if data.ragged_obs:
pu = np.array([k.shape[0] for k in data.obs_data.K])
if np.all(pu == pu[0]): pu = pu[0]
else: raise ValueError('first dimension in lists not equal')
else:
pu = data.obs_data.K.shape[0]
ncol = 5
nrow = int(np.ceil((min(pu,max_plots) + 1) / ncol)) # add 1 for mean line
fig_obs, axs_obs = plt.subplots(nrow,ncol,figsize=(12, 2 * nrow))
fig_obs.tight_layout()
for i,ax in enumerate(axs_obs.flatten()):
if i == 0: # plot mean line
if data.ragged_obs: ax.plot(data.obs_data.y_ind[i],np.mean(data.obs_data.K[i],axis=0))
else: ax.plot(data.obs_data.y_ind, np.mean(data.obs_data.K,axis=0))
ax.set_title('obs mean')
ax.set_ylabel('obs K basis')
ax.set_xlabel('obs y_ind')
elif i < pu+1:
if data.ragged_obs: ax.plot(data.obs_data.y_ind[i],data.obs_data.K[i].T[:,i-1])
else: ax.plot(data.obs_data.y_ind, data.obs_data.K.T[:,i-1])
ax.set_title('PC %d' % (i))
ax.set_xlabel('obs y_ind')
else:
ax.axis('off')
return(fig_sim,fig_obs)
else:
return fig_sim
def plot_K_weights(data, max_u_plot=5):
"""
Plots K basis weights for both sim and obs data (if applicable). Only applies to multivariate-output models.
TODO: Lamy should be 1/Sigy_std
:param SepiaData data: SepiaData object
:param int max_u_plot: max number of u's for which to plot vertical line over histogram of w's
:return: tuple containing matplotlib figure objects: (fig_uw, fig_v) or just fig_uw if no discrepancy is specified
"""
# Return early if scalar out or basis not set up
if data.scalar_out:
print('Scalar output, no K weights to plot.')
return
if data.sim_data.K is None:
print('K basis not set up, call create_K_basis() first.')
return
# Compute sim K weights
pu = data.sim_data.K.shape[0]
ncol = 5
nrow = int(np.ceil(pu / ncol))
w = np.dot(np.linalg.pinv(data.sim_data.K).T, data.sim_data.y_std.T).T
fig_uw, axs_uw = plt.subplots(nrow,ncol,figsize=(10,2*nrow))
fig_uw.tight_layout()
# Compute obs K weights if obs are present
if not data.sim_only and data.obs_data.K is not None:
# set pu
if data.ragged_obs:
pu = np.array([k.shape[0] for k in data.obs_data.K])
if np.all(pu == pu[0]): pu = pu[0]
else: raise ValueError('first dimension in lists not equal')
else:
pu = data.obs_data.K.shape[0]
# No D
if data.obs_data.D is None:
pv = 0
DKridge = 1e-6 * np.diag(np.ones(pu + pv)) # (pu+pv, pu+pv)
# compute u
if data.ragged_obs:
u = []
for i in range(len(data.obs_data.y_ind)):
DK = data.obs_data.K[i]
Lamy = np.eye(data.obs_data.y_ind[i].shape[0])
DKprod = np.linalg.multi_dot([DK, Lamy, DK.T])
u.append(np.dot(np.linalg.inv(DKprod + DKridge), np.linalg.multi_dot([DK, Lamy, data.obs_data.y_std[i].T])).T)
u = np.array(u)
else:
DK = data.obs_data.K
Lamy = np.eye(data.obs_data.y_ind.shape[0]) # Identity with size len(y_ind) how to do this with ragged?
DKprod = np.linalg.multi_dot([DK, Lamy, DK.T]) # (pu+pv, pu+pv)
u = np.dot(np.linalg.inv(DKprod + DKridge), np.linalg.multi_dot([DK, Lamy, data.obs_data.y_std.T])).T
nrow = int(np.ceil(pu / ncol))
if u.shape[1] == w.shape[1]:
for i,ax in enumerate(axs_uw.flatten()):
if i < w.shape[1]:
limit = abs(max(max(w[:,i].min(), w[:,i].max(), key=abs),\
max(u[:,i].min(), u[:,i].max(), key=abs), key=abs))
ax.set_xlim([-1.25*limit,1.25*limit])
bins_uw = np.linspace(-limit,limit,15,endpoint=True)
ax.set_xlabel('PC %d wt' % (i+1))
ax.set_xlim([-limit,limit])
ax.hist(w[:,i],bins=bins_uw,label='w',density=True)
for j in range(min(u.shape[0],max_u_plot)):
ax.axvline(u[j,i],color='darkorange',label='u' if j==0 else '_')
ax.legend(prop={'size': 6})
else:
ax.axis('off')
return fig_uw
else: # do u and w independently
raise ValueError('u.shape[1] != w.shape[1]')
else: # D
if data.ragged_obs:
pv = np.array([d.shape[0] for d in data.obs_data.D])
if np.all(pv == pv[0]): pv = pv[0]
else: raise ValueError('first dimension in lists not equal')
DKridge = 1e-6 * np.diag(np.ones(pu + pv)) # (pu+pv, pu+pv)
u = []
v = []
for i in range(len(data.obs_data.D)):
DK = np.concatenate([data.obs_data.D[i], data.obs_data.K[i]])
Lamy = np.eye(data.obs_data.y_ind[i].shape[0])
DKprod = np.linalg.multi_dot([DK, Lamy, DK.T]) # (pu+pv, pu+pv)
vu = np.dot(np.linalg.inv(DKprod + DKridge), np.linalg.multi_dot([DK, Lamy, data.obs_data.y_std[i].T]))
v.append(vu[:pv].T)
u.append(vu[pv:].T)
u = np.array(u)
v = np.array(v)
else:
pv = data.obs_data.D.shape[0]
DK = np.concatenate([data.obs_data.D, data.obs_data.K]) # (pu+pv, ell_obs)
DKridge = 1e-6 * np.diag(np.ones(pu + pv)) # (pu+pv, pu+pv)
Lamy = np.eye(data.obs_data.y_ind.shape[0])
DKprod = np.linalg.multi_dot([DK, Lamy, DK.T]) # (pu+pv, pu+pv)
vu = np.dot(np.linalg.inv(DKprod + DKridge), np.linalg.multi_dot([DK, Lamy, data.obs_data.y_std.T]))
v = vu[:pv, :].T
u = vu[pv:, :].T
if u.shape[1] == w.shape[1]:
for i,ax in enumerate(axs_uw.flatten()):
if i < w.shape[1]:
limit = abs(max(max(w[:,i].min(), w[:,i].max(), key=abs),\
max(u[:,i].min(), u[:,i].max(), key=abs), key=abs))
ax.set_xlim([-1.1*limit,1.1*limit])
bins_uw = np.linspace(-limit,limit,15,endpoint=True)
ax.set_xlabel('PC %d wt' % (i+1))
ax.hist(w[:,i],bins=bins_uw,label='w',density=True)
for j in range(min(u.shape[0],max_u_plot)):
ax.axvline(u[j,i],color='darkorange',label='u' if j==0 else '_')
ax.legend(prop={'size': 6})
else:
ax.axis('off')
else:
raise ValueError('u.shape[1] != w.shape[1]')
# V
nrow = int(np.ceil(pv / ncol))
fig_v, axs_v = plt.subplots(nrow,ncol,figsize=(10,2*nrow))
fig_v.tight_layout()
for i,ax in enumerate(axs_v.flatten()):
if i < v.shape[1]:
ax.hist(v[:,i],density=True)
ax.set_xlabel('D %d wt : v' % (i+1))
else:
ax.axis('off')
return (fig_uw, fig_v)
def plot_u_w_pairs(data, max_plots=5, save=False):
"""
Plots principal component basis weights for both sim and obs data (if applicable). Only applies to multivariate-output models.
:param SepiaData data: SepiaData object
:param int max_plots: max number of principal components to plot
:return: matplotlib figure fig_g: seaborn pairs figure
"""
# Return early if scalar out or basis not set up
if data.scalar_out:
print('Scalar output, no K weights to plot.')
return
if data.sim_data.K is None:
print('K basis not set up, call create_K_basis() first.')
return
pu = data.sim_data.K.shape[0]
w = np.dot(np.linalg.pinv(data.sim_data.K).T, data.sim_data.y_std.T).T
if not data.sim_only and data.obs_data.K is not None:
if data.ragged_obs:
pu = np.array([k.shape[0] for k in data.obs_data.K])
if np.all(pu == pu[0]): pu = pu[0]
else: raise ValueError('first dimension in lists not equal')
else:
pu = data.obs_data.K.shape[0]
# No D
if data.obs_data.D is None:
pv = 0
DKridge = 1e-6 * np.diag(np.ones(pu + pv)) # (pu+pv, pu+pv)
if data.ragged_obs:
u = []
for i in range(len(data.obs_data.K)):
DK = data.obs_data.K[i]
Lamy = np.eye(data.obs_data.y_ind[i].shape[0])
DKprod = np.linalg.multi_dot([DK, Lamy, DK.T]) # (pu+pv, pu+pv)
u.append(np.dot(np.linalg.inv(DKprod + DKridge), np.linalg.multi_dot([DK, Lamy, data.obs_data.y_std[i].T])).T)
u = np.array(u)
else:
DK = data.obs_data.K
Lamy = np.eye(data.obs_data.y_ind.shape[0])
DKprod = np.linalg.multi_dot([DK, Lamy, DK.T]) # (pu+pv, pu+pv)
u = np.dot(np.linalg.inv(DKprod + DKridge), np.linalg.multi_dot([DK, Lamy, data.obs_data.y_std.T])).T
else: # D
if data.ragged_obs:
pv = np.array([d.shape[0] for d in data.obs_data.D])
if np.all(pv == pv[0]): pv = pv[0]
else: raise ValueError('first dimension in lists not equal')
DKridge = 1e-6 * np.diag(np.ones(pu + pv)) # (pu+pv, pu+pv)
u = []
v = []
for i in range(len(data.obs_data.D)):
DK = np.concatenate([data.obs_data.D[i], data.obs_data.K[i]])
Lamy = np.eye(data.obs_data.y_ind[i].shape[0])
DKprod = np.linalg.multi_dot([DK, Lamy, DK.T]) # (pu+pv, pu+pv)
vu = np.dot(np.linalg.inv(DKprod + DKridge), np.linalg.multi_dot([DK, Lamy, data.obs_data.y_std[i].T]))
v.append(vu[:pv].T)
u.append(vu[pv:].T)
u = np.array(u)
v = np.array(v)
else:
pv = data.obs_data.D.shape[0]
DK = np.concatenate([data.obs_data.D, data.obs_data.K]) # (pu+pv, ell_obs)
DKridge = 1e-6 * np.diag(np.ones(pu + pv)) # (pu+pv, pu+pv)
Lamy = np.eye(data.obs_data.y_ind.shape[0])
DKprod = np.linalg.multi_dot([DK, Lamy, DK.T]) # (pu+pv, pu+pv)
vu = np.dot(np.linalg.inv(DKprod + DKridge), np.linalg.multi_dot([DK, Lamy, data.obs_data.y_std.T]))
v = vu[:pv, :].T
u = vu[pv:, :].T
# change u,w to match max_plots
if w.shape[1]>max_plots:
w = w[:,0:max_plots]
print('Plotting up to',max_plots,'pairs. Change with parameter \'max_plots\'')
col_names = []
for i in range(w.shape[1]): col_names.append('w{}'.format(i+1))
w_df = pd.DataFrame(data=w,columns=col_names)
if u.shape[1]>max_plots: u = u[:,0:max_plots]
lims = max(np.maximum(np.max(np.abs(w),axis=0),np.max(np.abs(u),axis=0))*1.1)
with sns.plotting_context("notebook", font_scale=1):
g = sns.PairGrid(w_df)
g.map_diag(sns.histplot, kde=True)
g.map_offdiag(sns.scatterplot)
for i in range(g.axes.shape[1]): # rows
for j in range(g.axes.shape[0]): # columns
g.axes[i,j].set_xlim(-lims,lims); g.axes[i,j].set_ylim(-lims,lims)
if i == j:
for k in range(u.shape[0]):
g.axes[i,i].axvline(u[k,i],color='darkorange',label='u{}'.format(i+1) if k==0 else "_")
g.axes[i,i].legend(facecolor='white')
else:
g.axes[i,j].scatter(u[:,j],u[:,i],c='darkorange',label='(u{},u{})'.format(j+1,i+1))
g.axes[i,j].legend(facecolor='white')
if save: plt.savefig(save,dpi=300)
return g.fig
def plot_K_residuals(data):
"""
Plots residuals after projection to K basis. Only applies to multivariate-output models.
:param SepiaData data: SepiaData object
:return: tuple containing matplotlib figure objects: (fig_u, fig_v) or just fig_noD if no discrepancy is specified
"""
# Return early if scalar out or basis not set up
if data.scalar_out:
print('Scalar output, no K residuals to plot.')
return
if data.sim_data.K is None:
print('K basis not set up, call create_K_basis() first.')
return
if not data.sim_only and data.obs_data.K is not None:
if isinstance(data.obs_data.K, list):
print('plot_K_residuals cannot yet handle ragged observations')
return
pu = data.obs_data.K.shape[0]
if data.obs_data.D is None:
pv = 0
DK = data.obs_data.K
DKridge = 1e-6 * np.diag(np.ones(pu + pv)) # (pu+pv, pu+pv)
Lamy = np.eye(data.obs_data.y_ind.shape[0])
DKprod = np.linalg.multi_dot([DK, Lamy, DK.T]) # (pu+pv, pu+pv)
u = np.dot(np.linalg.inv(DKprod + DKridge), np.linalg.multi_dot([DK, Lamy, data.obs_data.y_std.T])).T
proj = np.dot(u, DK)
resid = data.obs_data.y_std - proj
fig_noD, axs_noD = plt.subplots(1,3,figsize=(4,6))
axs_noD[0].plot(data.obs_data.y_ind, data.obs_data.y_std.squeeze().T)
axs_noD[0].set_title('obs y_std')
axs_noD[0].set_xlabel('obs y_ind')
axs_noD[1].plot(data.obs_data.y_ind, proj.squeeze().T)
axs_noD[1].set_title('obs projection reconstruction')
axs_noD[1].set_xlabel('obs y_ind')
axs_noD[2].plot(data.obs_data.y_ind, resid.squeeze().T, '-')
axs_noD[2].set_title('obs projection residual')
axs_noD[2].set_xlabel('obs y_ind')
return fig_noD
else:
pv = data.obs_data.D.shape[0]
DK = np.concatenate([data.obs_data.D, data.obs_data.K]) # (pu+pv, ell_obs)
DKridge = 1e-6 * np.diag(np.ones(pu + pv)) # (pu+pv, pu+pv)
Lamy = np.eye(data.obs_data.y_ind.shape[0])
DKprod = np.linalg.multi_dot([DK, Lamy, DK.T]) # (pu+pv, pu+pv)
vu = np.dot(np.linalg.inv(DKprod + DKridge), np.linalg.multi_dot([DK, Lamy, data.obs_data.y_std.T]))
v = vu[:pv, :].T
u = vu[pv:, :].T
ncol = 5
nrow = int(np.ceil(pu / ncol))
fig_u,axs_u = plt.subplots(nrow,ncol,figsize=(8, 2 * nrow))
for i, ax in enumerate(axs_u.flatten()):
if i < pu:
ax.hist(u[:, i])
ax.set_xlabel('PC %d wt' % (i+1))
else:
ax.axis('off')
nrow = int(np.ceil(pv / ncol))
fig_v,axs_v = plt.subplots(nrow,ncol,figsize=(8, 2 * nrow))
for i,ax in enumerate(axs_v.flatten()):
if i < pv:
ax.hist(v[:, i])
ax.set_xlabel('D %d wt' % (i+1))
else:
ax.axis('off')
return (fig_u,fig_v)
def plot_data(data,which_x=None,x_min=None,x_max=None,y_min=None,y_max=None,n_neighbors=3,max_sims=50,save=None):
"""
Plots observed data and simulation runs on the same axis with n_neighbors nearest simulations
in x-space. Only applies to multivariate-output models with both simulation and observed data.
:param SepiaData data: SepiaData object
:param list/NoneType which_x: optionally sets which x_obs indices to plot
:param float x_min: sets x lower limit on plot
:param float x_max: sets x upper limit on plot
:param float y_min: sets y lower limit on plot
:param float y_max: sets y upper limit on plot
:param int n_neighbors: sets number of nearest simulations to highlight
:param int max_sims: sets maximum number of simulation runs to plot
:return matplotlib figure fig: figure object of plot
"""
if data.sim_only:
print('plot_data does not currently work for sim_only models.')
return
if data.scalar_out:
print('plot_data does not currently work for univariate output models.')
return
n = data.obs_data.x.shape[0]
m = data.sim_data.x.shape[0]
# plot up to 4 input space points
if n > 4:
# if no which_x or given which_x is out of bounds
if which_x is None or (which_x is not None and not np.all(which_x)<n and not np.all(which_x>-1)):
# choose 4 equally space input points to plot
which_x = np.linspace(0,n-1,4,dtype=int)
x_plot = data.obs_data.x[which_x,:]
else:
which_x = np.arange(0,n,1,dtype=int)
x_plot = data.obs_data.x
n_plots = x_plot.shape[0]
# get axis limits
if data.ragged_obs:
if x_min is None: x_min = min(min([np.amin(k) for k in data.obs_data.y_ind]),np.amin(data.sim_data.y_ind))
if x_max is None: x_max = max(max([ | np.amax(k) | numpy.amax |
import numpy as np
from bisect import bisect_left
def overlap(min1, max1, min2, max2):
return max(0, min(max1, max2) - max(min1, min2))
def overlap_1d(x):
return max(0, min(x[1], x[3]) - max(x[0], x[2]))
def shifted_modified_hy_estimator(x, y, t_x, t_y, k, normalize=False): # contrast function
# print('Common Python.')
hy_cov = 0.0
if normalize:
norm_x = np.sqrt(np.sum(np.square(np.diff(x[t_x]))))
norm_y = np.sqrt(np.sum(np.square(np.diff(y[t_y]))))
else:
norm_x = 1.0
norm_y = 1.0
clipped_t_y_minus_k = np.clip(t_y - k, np.min(t_y), | np.max(t_y) | numpy.max |
from __future__ import print_function
import threading
import multiprocessing
from collections import namedtuple
import os
try:
from queue import Queue, Full, Empty
except ImportError:
from Queue import Queue, Full, Empty
try:
import cPickle as pickle
except ImportError:
import pickle
import numpy as np
import h5py
import tensorflow as tf
from pybh import math_utils, hdf5_utils
def write_samples_to_hdf5_file(filename, samples, attr_dict=None, **dataset_kwargs):
stacked_samples = {}
for key in samples[0]:
stacked_samples[key] = np.empty((len(samples),) + samples[0][key].shape, dtype=samples[0][key].dtype)
for key in samples[0]:
for i, sample in enumerate(samples):
stacked_samples[key][i, ...] = sample[key]
hdf5_utils.write_numpy_dict_to_hdf5_file(filename, stacked_samples, attr_dict=attr_dict,
**dataset_kwargs)
def read_samples_from_hdf5_file(filename, field_dict=None, read_attributes=True):
result = hdf5_utils.read_hdf5_file_to_numpy_dict(filename, field_dict, read_attributes)
if read_attributes:
data, attr_dict = result
else:
data = result
key_list = list(data.keys())
sample_dict = {}
for key in key_list:
assert(data[key].shape[0] == data[key_list[0]].shape[0])
sample_dict[key] = []
for i in range(data[key].shape[0]):
sample_dict[key].append(data[key][i, ...])
samples = []
for i in range(data[key_list[0]].shape[0]):
samples.append({key: sample_dict[key][i] for key in sample_dict})
if read_attributes:
return samples, attr_dict
else:
return samples
Record = namedtuple("Record", ["obs_levels", "grid_3d", "rewards", "prob_rewards", "scores"])
RecordBatch = namedtuple("RecordBatch", ["obs_levels", "grid_3ds", "rewards", "prob_rewards", "scores"])
RecordV2 = namedtuple("RecordV2", ["obs_levels", "grid_3d", "rewards", "norm_rewards",
"prob_rewards", "norm_prob_rewards", "scores"])
RecordV2Batch = namedtuple("RecordV2Batch", ["obs_levels", "grid_3ds", "rewards", "norm_rewards",
"prob_rewards", "norm_prob_rewards", "scores"])
RecordV3 = namedtuple("RecordV3", ["obs_levels", "in_grid_3d", "out_grid_3d", "rewards", "scores"])
RecordV3Batch = namedtuple("RecordV3Batch", ["obs_levels", "in_grid_3ds", "out_grid_3ds", "rewards", "scores"])
RecordV4 = namedtuple("RecordV4", ["intrinsics", "map_resolution", "axis_mode", "forward_factor",
"obs_levels", "in_grid_3d", "out_grid_3d", "rewards", "scores",
"rgb_image", "depth_image", "normal_image"])
RecordV4Batch = namedtuple("RecordV4Batch", ["intrinsics", "map_resolution", "axis_mode", "forward_factor",
"obs_levels", "in_grid_3ds", "out_grid_3ds", "rewards", "scores",
"rgb_images", "depth_images", "normal_images"])
def write_hdf5_records(filename, records):
f = h5py.File(filename, "w")
(obs_levels, grid_3d, rewards, prob_rewards, scores) = records[0]
assert(grid_3d.shape[-1] == 2 * len(obs_levels))
rewards_shape = (len(records),) + rewards.shape
rewards_dset = f.create_dataset("rewards", rewards_shape, dtype='f')
prob_rewards_shape = (len(records),) + prob_rewards.shape
prob_rewards_dset = f.create_dataset("prob_rewards", prob_rewards_shape, dtype='f')
scores_shape = (len(records),) + scores.shape
scores_dset = f.create_dataset("scores", scores_shape, dtype='f')
grid_3ds_shape = (len(records),) + grid_3d.shape
grid_3ds_dset = f.create_dataset("grid_3ds", grid_3ds_shape, dtype='f')
grid_3ds_dset.attrs["obs_levels"] = obs_levels
grid_3ds_dset.attrs["obs_channels"] = grid_3ds_shape[-1] / len(obs_levels)
for i, record in enumerate(records):
(obs_levels, grid_3d, rewards, prob_rewards, scores) = record
rewards_dset[i, ...] = rewards
prob_rewards_dset[i, ...] = prob_rewards
scores_dset[i, ...] = scores
grid_3ds_dset[i, ...] = grid_3d
f.close()
def read_hdf5_records(filename):
try:
f = h5py.File(filename, "r")
obs_levels = f["grid_3ds"].attrs["obs_levels"]
obs_channels = f["grid_3ds"].attrs["obs_channels"]
rewards = np.array(f["rewards"])
prob_rewards = np.array(f["prob_rewards"])
scores = np.array(f["scores"])
grid_3ds = np.array(f["grid_3ds"])
assert(rewards.shape[0] == grid_3ds.shape[0])
assert(prob_rewards.shape[0] == grid_3ds.shape[0])
assert(scores.shape[0] == grid_3ds.shape[0])
assert(grid_3ds.shape[-1] == len(obs_levels) * obs_channels)
return RecordBatch(obs_levels, grid_3ds, rewards, prob_rewards, scores)
except Exception as err:
print("ERROR: Exception raised when reading as HDF5 v1 file \"{}\": {}".format(filename, err))
def write_hdf5_records_v2(filename, records):
f = h5py.File(filename, "w")
(obs_levels, grid_3d, rewards, norm_rewards, prob_rewards, norm_prob_rewards, scores) = records[0]
assert(grid_3d.shape[-1] == 2 * len(obs_levels))
rewards_shape = (len(records),) + rewards.shape
rewards_dset = f.create_dataset("rewards", rewards_shape, dtype='f')
norm_rewards_shape = (len(records),) + norm_rewards.shape
norm_rewards_dset = f.create_dataset("norm_rewards", norm_rewards_shape, dtype='f')
prob_rewards_shape = (len(records),) + prob_rewards.shape
prob_rewards_dset = f.create_dataset("prob_rewards", prob_rewards_shape, dtype='f')
norm_prob_rewards_shape = (len(records),) + norm_prob_rewards.shape
norm_prob_rewards_dset = f.create_dataset("norm_prob_rewards", norm_prob_rewards_shape, dtype='f')
scores_shape = (len(records),) + scores.shape
scores_dset = f.create_dataset("scores", scores_shape, dtype='f')
grid_3ds_shape = (len(records),) + grid_3d.shape
grid_3ds_dset = f.create_dataset("grid_3ds", grid_3ds_shape, dtype='f')
grid_3ds_dset.attrs["obs_levels"] = obs_levels
grid_3ds_dset.attrs["obs_channels"] = grid_3ds_shape[-1] / len(obs_levels)
for i, record in enumerate(records):
(obs_levels, grid_3d, rewards, norm_rewards, prob_rewards, norm_prob_rewards, scores) = record
rewards_dset[i, ...] = rewards
norm_rewards_dset[i, ...] = norm_rewards
prob_rewards_dset[i, ...] = prob_rewards
norm_prob_rewards_dset[i, ...] = norm_prob_rewards
scores_dset[i, ...] = scores
grid_3ds_dset[i, ...] = grid_3d
f.close()
def write_hdf5_records_v3(filename, records):
f = h5py.File(filename, "w")
(obs_levels, in_grid_3d, out_grid_3d, rewards, scores) = records[0]
assert(in_grid_3d.shape[-1] == 2 * len(obs_levels))
assert(np.all(in_grid_3d.shape == out_grid_3d.shape))
rewards_shape = (len(records),) + rewards.shape
rewards_dset = f.create_dataset("rewards", rewards_shape, dtype='f')
scores_shape = (len(records),) + scores.shape
scores_dset = f.create_dataset("scores", scores_shape, dtype='f')
in_grid_3ds_shape = (len(records),) + in_grid_3d.shape
in_grid_3ds_dset = f.create_dataset("in_grid_3ds", in_grid_3ds_shape, dtype='f')
out_grid_3ds_shape = (len(records),) + out_grid_3d.shape
out_grid_3ds_dset = f.create_dataset("out_grid_3ds", out_grid_3ds_shape, dtype='f')
f.attrs["obs_levels"] = obs_levels
f.attrs["obs_channels"] = out_grid_3ds_shape[-1] / len(obs_levels)
for i, record in enumerate(records):
(obs_levels, in_grid_3d, out_grid_3d, rewards, scores) = record
in_grid_3ds_dset[i, ...] = in_grid_3d
out_grid_3ds_dset[i, ...] = out_grid_3d
rewards_dset[i, ...] = rewards
scores_dset[i, ...] = scores
f.close()
def create_dataset_with_fixed_chunks(f, name, shape, dtype=None, **kwargs):
return f.create_dataset(name, shape, dtype=dtype, **kwargs)
# record_size = 4 * reduce(int.__mul__, shape[1:])
# chunk_records = 1024 * 1024 / record_size
# chunk_records = min(shape[0], chunk_records)
# if chunk_records == 0:
# chunk_records = 1
# chunks = (chunk_records,) + shape[1:]
# # print("chunks:", chunks)
# maxshape = shape
# return f.create_dataset(name, shape, dtype=dtype, chunks=chunks, maxshape=maxshape, **kwargs)
def write_hdf5_records_v4(filename, records, dataset_kwargs):
f = h5py.File(filename, "w")
rec0 = records[0]
assert(rec0.in_grid_3d.shape[-1] == 2 * len(rec0.obs_levels))
assert(np.all(rec0.in_grid_3d.shape == rec0.out_grid_3d.shape))
rewards_shape = (len(records),) + rec0.rewards.shape
rewards_dset = create_dataset_with_fixed_chunks(f, "rewards", rewards_shape, dtype=np.float32, **dataset_kwargs)
scores_shape = (len(records),) + rec0.scores.shape
scores_dset = create_dataset_with_fixed_chunks(f, "scores", scores_shape, dtype=np.float32, **dataset_kwargs)
in_grid_3ds_shape = (len(records),) + rec0.in_grid_3d.shape
in_grid_3ds_dset = create_dataset_with_fixed_chunks(f, "in_grid_3ds", in_grid_3ds_shape, dtype=np.float32, **dataset_kwargs)
out_grid_3ds_shape = (len(records),) + rec0.out_grid_3d.shape
out_grid_3ds_dset = create_dataset_with_fixed_chunks(f, "out_grid_3ds", out_grid_3ds_shape, dtype=np.float32, **dataset_kwargs)
rgb_image_shape = (len(records),) + rec0.rgb_image.shape
rgb_image_dset = create_dataset_with_fixed_chunks(f, "rgb_images", rgb_image_shape, dtype=np.uint8, **dataset_kwargs)
depth_image_shape = (len(records),) + rec0.depth_image.shape
depth_image_dset = create_dataset_with_fixed_chunks(f, "depth_images", depth_image_shape, dtype=np.float32, **dataset_kwargs)
normal_image_shape = (len(records),) + rec0.normal_image.shape
normal_image_dset = create_dataset_with_fixed_chunks(f, "normal_images", normal_image_shape, dtype=np.float32, **dataset_kwargs)
f.attrs["obs_levels"] = rec0.obs_levels
f.attrs["obs_channels"] = out_grid_3ds_shape[-1] / len(rec0.obs_levels)
f.attrs["intrinsics"] = rec0.intrinsics
f.attrs["map_resolution"] = rec0.map_resolution
f.attrs["axis_mode"] = rec0.axis_mode
f.attrs["forward_factor"] = rec0.forward_factor
for i, record in enumerate(records):
assert(np.all(record.intrinsics == rec0.intrinsics))
assert(record.map_resolution == rec0.map_resolution)
assert(record.axis_mode == rec0.axis_mode)
assert(record.forward_factor == rec0.forward_factor)
assert(np.all(record.obs_levels == rec0.obs_levels))
assert(np.all(record.in_grid_3d.shape == rec0.in_grid_3d.shape))
assert(np.all(record.out_grid_3d.shape == rec0.out_grid_3d.shape))
in_grid_3ds_dset[i, ...] = record.in_grid_3d
out_grid_3ds_dset[i, ...] = record.out_grid_3d
rewards_dset[i, ...] = record.rewards
scores_dset[i, ...] = record.scores
rgb_image_dset[i, ...] = record.rgb_image
depth_image_dset[i, ...] = record.depth_image
normal_image_dset[i, ...] = record.normal_image
f.close()
def read_hdf5_records_v2(filename):
try:
f = h5py.File(filename, "r")
obs_levels = np.array(f["grid_3ds"].attrs["obs_levels"])
obs_channels = np.array(f["grid_3ds"].attrs["obs_channels"])
rewards = np.array(f["rewards"])
norm_rewards = np.array(f["norm_rewards"])
prob_rewards = np.array(f["prob_rewards"])
norm_prob_rewards = np.array(f["norm_prob_rewards"])
scores = np.array(f["scores"])
grid_3ds = | np.array(f["grid_3ds"]) | numpy.array |
# -*- coding: utf-8 -*-
"""
Created on Mon Oct 23 16:16:17 2017
@author: Vicky
"""
import numpy as np
from sklearn.cluster import KMeans
#Student Information
print('UBitName = vvasanth')
print('personNumber = 50248708')
print('UBitName = rajvinod')
print('personNumber = 50247214')
print('UBitName = ss623')
print('personNumber = 50247317')
#importing the letor dataset
letor_input_data = np.genfromtxt('datafiles/Querylevelnorm_X.csv', delimiter=',')
letor_output_data = np.genfromtxt('datafiles/Querylevelnorm_t.csv', delimiter=',').reshape([-1, 1])
#partitioning the data set into training, validation and test sets
letor_input_data_columns=letor_input_data.shape[1]
train_rows=round(letor_input_data.shape[0]*0.8)
test_validation_rows=round(letor_input_data.shape[0]*0.1)
train_x=letor_input_data[:train_rows+1,:letor_input_data_columns]
validation_x=letor_input_data[train_rows+1:train_rows+1+test_validation_rows,:letor_input_data_columns]
test_x=letor_input_data[train_rows+1+test_validation_rows:,:letor_input_data_columns]
train_y=letor_output_data[:train_rows+1,:letor_input_data_columns]
validation_y=letor_output_data[train_rows+1:train_rows+1+test_validation_rows,:letor_input_data_columns]
test_y=letor_output_data[train_rows+1+test_validation_rows:,:letor_input_data_columns]
#applying k-means on the letor dataset
kmeans=KMeans(n_clusters=4, random_state=0).fit(train_x)
cluster_0=[]
cluster_1=[]
cluster_2=[]
cluster_3=[]
#design matrix function
def compute_design_matrix(X, centers, spreads):
# use broadcast
basis_func_outputs = np.exp(
np.sum(
np.matmul(X - centers, spreads) * (X - centers),
axis=2
) / (-2)
).T
# insert ones to the 1st col
return np.insert(basis_func_outputs, 0, 1, axis=1)
for i in range(0,train_x.shape[0]):
eval('cluster_'+str(kmeans.labels_[i])).append(train_x[i])
#calculating the spreads and centers
cov_cluster_0=np.linalg.pinv(np.cov(np.array(cluster_0).T))
cov_cluster_1=np.linalg.pinv(np.cov(np.array(cluster_1).T))
cov_cluster_2=np.linalg.pinv(np.cov(np.array(cluster_2).T))
cov_cluster_3=np.linalg.pinv(np.cov(np.array(cluster_3).T))
centers=kmeans.cluster_centers_
centers=centers[:, np.newaxis, :]
spreads= | np.array([cov_cluster_0,cov_cluster_1,cov_cluster_2,cov_cluster_3]) | numpy.array |
#
# Evaluate trained models
#
import warnings
warnings.simplefilter(action='ignore')
from keras import backend as K
from src.model1 import uResNet34
from src.train import get_train_test_split, get_norm_dict
from src.gen1 import SliceIterator, primary_transform
from src.const import (
model_dir, model_input_size,
wells, ilines, xlines, nsamples, dt,
slices_dir, crossval_dict
)
import numpy as np
import cv2
from itertools import chain
import matplotlib.pyplot as plt
from typing import List
from pathlib import Path
model_class = uResNet34
# model weights
weights = {
'Gamma_Ray': [
'uResNet34.Gamma_Ray.sz480x512.smtd_0.14-0.78.hdf5',
'uResNet34.Gamma_Ray.sz480x512.smtd_1.11-0.37.hdf5',
'uResNet34.Gamma_Ray.sz480x512.smtd_2.07-0.65.hdf5',
'uResNet34.Gamma_Ray.sz480x512.smtd_3.42-0.67.hdf5'
],
}
def predict_on_fold(slice_list: List[Path], carotage: str, model_weights: Path, verbose: bool = False) -> dict:
"""predict model for a single fold
return: dict[slice/well]{'seism', 'mask', 'y_true', 'y_pred', 'corr'}"""
norm_dict = get_norm_dict()
norm = [(norm_dict[c]['mean'], norm_dict[c]['std']) for c in ['seismic', carotage]]
K.clear_session()
model = model_class(input_size=model_input_size, weights=model_weights, n_carotage=1)
gen = SliceIterator(slice_list, [carotage], model_input_size, transform_fun=primary_transform, norm=norm, aug=False,
batch_size=10, shuffle=False, seed=None, verbose=False, output_ids=True, infinite_loop=False)
x_m, y, ids = zip(*gen)
x, m = zip(*x_m)
x = np.concatenate(x)
m = np.concatenate(m)
y = np.concatenate(y)
ids = list(chain(*ids))
pred = model.predict([x, m], batch_size=1)
data = {}
for seismic, mask, y_true, p, i_d in zip(x, m, y, pred, ids):
designation_size = (max(ilines) - min(ilines) + 1, nsamples) if 'xline' in i_d \
else (max(xlines) - min(xlines) + 1, nsamples)
y_pred = p[..., :1]
seism = cv2.resize(seismic[..., 0], dsize=designation_size, interpolation=cv2.INTER_CUBIC)
mask = cv2.resize(mask[..., 0], dsize=designation_size, interpolation=cv2.INTER_NEAREST)
y_true = cv2.resize(y_true[..., 0], dsize=designation_size, interpolation=cv2.INTER_NEAREST)
y_pred = cv2.resize(y_pred[..., 0], dsize=designation_size, interpolation=cv2.INTER_NEAREST)
corr = np.corrcoef(y_true[mask.astype(bool)], y_pred[mask.astype(bool)])[0, 1]
data[i_d] = {'seism': seism, 'mask': mask, 'y_true': y_true, 'y_pred': y_pred, 'corr': corr}
if verbose:
# provisional correlation based on single pixels. not used in final evaluation
print(f'corr={np.mean([d["corr"] for d in data if ~ | np.isnan(d["corr"]) | numpy.isnan |
# Copyright 2020 The SQLFlow Authors. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import unittest
from unittest import TestCase
import numpy as np
import runtime.testing as testing
from runtime.db import (buffered_db_writer, connect_with_data_source,
db_generator, get_table_schema, limit_select,
read_feature, read_features_from_row,
selected_columns_and_types)
from runtime.dbapi import connect
from runtime.dbapi.mysql import MYSQL_FIELD_TYPE_DICT
def execute(conn, statement):
rs = conn.query(statement)
field_names = [c[0] for c in rs.column_info()]
rows = [r for r in rs]
return field_names, rows
class TestDB(TestCase):
create_statement = "create table test_db (features text, label int)"
hive_create_statement = 'create table test_db (features string, ' \
'label int) ROW FORMAT DELIMITED FIELDS ' \
'TERMINATED BY "\001"'
select_statement = "select * from test_db"
drop_statement = "drop table if exists test_db"
@unittest.skipUnless(testing.get_driver() == "mysql",
"skip non mysql tests")
def test_mysql(self):
conn = connect(testing.get_datasource())
self._do_test(conn)
conn.close()
@unittest.skipUnless(testing.get_driver() == "hive", "skip non hive tests")
def test_hive(self):
uri = testing.get_datasource()
conn = connect(uri)
self._do_test(conn)
self._do_test_hive_specified_db(conn)
def _do_test_hive_specified_db(self, conn):
create_db = '''create database if not exists test_db'''
create_tbl = '''create table test_db.tbl (features string, label int)
ROW FORMAT DELIMITED FIELDS TERMINATED BY "\001"'''
drop_tbl = '''drop table if exists test_db.tbl'''
select_tbl = '''select * from test_db.tbl'''
table_schema = ["label", "features"]
values = [(1, '5,6,1,2')] * 10
self.assertTrue(conn.execute(create_db))
self.assertTrue(conn.execute(drop_tbl))
self.assertTrue(conn.execute(create_tbl))
with buffered_db_writer(conn,
"test_db.tbl",
table_schema,
buff_size=10) as w:
for row in values:
w.write(row)
field_names, data = execute(conn, select_tbl)
expect_result = [('5,6,1,2', 1)] * 10
self.assertEqual(field_names, ['features', 'label'])
self.assertEqual(expect_result, data)
def _do_test(self, conn):
table_name = "test_db"
table_schema = ["features", "label"]
values = [('5,6,1,2', 1)] * 10
conn.execute(self.drop_statement)
if conn.driver == "hive":
conn.execute(self.hive_create_statement)
else:
conn.execute(self.create_statement)
with buffered_db_writer(conn, table_name, table_schema,
buff_size=10) as w:
for row in values:
w.write(row)
field_names, data = execute(conn, self.select_statement)
self.assertEqual(table_schema, field_names)
self.assertEqual(values, data)
class TestGenerator(TestCase):
create_statement = "create table test_table_float_fea " \
"(features float, label int)"
drop_statement = "drop table if exists test_table_float_fea"
insert_statement = "insert into test_table_float_fea (features,label)" \
" values(1.0, 0), (2.0, 1)"
@unittest.skipUnless(testing.get_driver() == "mysql",
"skip non mysql tests")
def test_generator(self):
conn = connect(testing.get_datasource())
# prepare test data
conn.execute(self.drop_statement)
conn.execute(self.create_statement)
conn.execute(self.insert_statement)
column_name_to_type = {
"features": {
"feature_name": "features",
"delimiter": "",
"dtype": "float32",
"is_sparse": False,
"shape": []
}
}
label_meta = {"feature_name": "label", "shape": [], "delimiter": ""}
gen = db_generator(conn, "SELECT * FROM test_table_float_fea",
label_meta)
idx = 0
for row, label in gen():
features = read_features_from_row(row, ["features"], ["features"],
column_name_to_type)
d = (features, label)
if idx == 0:
self.assertEqual(d, (((1.0, ), ), 0))
elif idx == 1:
self.assertEqual(d, (((2.0, ), ), 1))
idx += 1
self.assertEqual(idx, 2)
@unittest.skipUnless(testing.get_driver() == "mysql",
"skip non mysql tests")
def test_generate_fetch_size(self):
label_meta = {"feature_name": "label", "shape": [], "delimiter": ""}
gen = db_generator(testing.get_singleton_db_connection(),
'SELECT * FROM iris.train limit 10', label_meta)
self.assertEqual(len([g for g in gen()]), 10)
class TestConnectWithDataSource(TestCase):
def test_kv_feature_column(self):
feature_spec = {
"name": "kv_feature_name",
"is_sparse": True,
"format": "kv",
"dtype": "float",
"shape": [10],
}
raw_val = "0:1 3:4 4:6"
indices, values, shape = read_feature(raw_val, feature_spec,
feature_spec["name"])
self.assertTrue(np.array_equal(indices, np.array([0, 3, 4],
dtype=int)))
self.assertTrue(np.array_equal(values, np.array([1, 4, 6], dtype=int)))
self.assertTrue(np.array_equal(shape, | np.array([10], dtype='float') | numpy.array |
#!/usr/bin/python
# Imports
import sys, os, re, time
import argparse
import pdb
import pickle
from itertools import *
# Science
import numpy as np
import scipy.stats as stats
import pandas as pd
# Plotting
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from matplotlib import colors
from matplotlib.lines import Line2D
################################## FUNCTIONS ############################
def tracking_analysis_for_attribute_cutoff_bootstrapped(cycle_stats_df, category_stats, attribute, cutoff_criteria, cutoff, n_bootstrapping, save_dir):
'''
Function that computes tracking analysis per group, based on bootstrapping
It computes the Kolmogorov-Smirnov tests between group distributions
It computes the likelihood in low, mid and high extremes of the metric
Input:
cycle_stats_df: pandas dataframe, with information about user's cycle statistics
category_stats: pandas dataframe, with information about user's tracking statistics for a given category
attribute: what specific tracking attribute to study: i.e., concatenation of the metric and the symptom to analyze
cutoff_criteria: what statistic to use for separating users into groups ('cycle_lengths' for paper)
cutoff: what statistic cutoff value to use for separating users into groups (9 for paper)
n_bootstrapping: Number of bootstrapped samples to use for the analysis
save_dir: path where to save plot
Output:
true_statistics: Dictionary with statistics for the observed cohort:
{'KS', 'p_val', 'prob_values_high', 'prob_values_low', 'ratios'}
bootstrap_statistics: Dictionary with statistics for the bootstrapped cohort:
{'KS': mean of the bootstrapped KS values, 'KS_0025': 2.5 percentile of the bootstrapped KS values, 'KS_0975': 97.5 percentile of the bootstrapped KS values,
'p_val': mean of the bootstrapped p_val values, 'p_val_0025': 2.5 percentile of the bootstrapped p_val values, 'p_val_0975': 97.5 percentile of the bootstrapped p_val values,
'prob_values_high': mean of the boostrapped probability values for the high volatility group,
'prob_values_high_0025': 2.5 percentile of the boostrapped probability values for the high volatility group,
'prob_values_high_0975': 97.5 percentile of the boostrapped probability values for the high volatility group,
'prob_values_low': mean of the boostrapped probability values for the low volatility group,
'prob_values_low_0025': 2.5 percentile of the boostrapped probability values for the low volatility group,
'prob_values_low_0975': 97.5 percentile of the boostrapped probability values for the low volatility group,
'ratios': mean of the bootstrapped ratios for the high to low volability groups
'ratios_0025': 2.5 percentile of the bootstrapped ratios for the high to low volability groups
'ratios_0975': 97.5 percentile of the bootstrapped ratios for the high to low volability groups}
'''
### Define
# Extreme likelihood ranges
extreme_bins=np.array([0,0.05,0.95,1])
# Histogram type, color and labels
hist_type='step'
colors = ['orange', 'c']
labels=['Highly variable', 'NOT highly variable']
# True separation of users into groups
all_users=np.unique(cycle_stats_df['user_id'])
true_users_greater_than_cutoff = np.unique(cycle_stats_df[cycle_stats_df[cutoff_criteria] > cutoff]['user_id'])
true_users_less_than_cutoff = np.unique(cycle_stats_df[cycle_stats_df[cutoff_criteria] <= cutoff]['user_id'])
n_users_greater_than_cutoff=true_users_greater_than_cutoff.size
n_users_less_than_cutoff=true_users_less_than_cutoff.size
true_category_stats_users_greater_than_cutoff = category_stats[category_stats['user_id'].isin(true_users_greater_than_cutoff)]
true_category_stats_users_less_than_cutoff = category_stats[category_stats['user_id'].isin(true_users_less_than_cutoff)]
# Analysis for proportion of cycles metric
if attribute.startswith('proportion_cycles_'):
########### TRUE OBSERVERD STATISTICS ##########
# KS
true_KS, true_p_val = stats.ks_2samp(true_category_stats_users_greater_than_cutoff[attribute].dropna(), true_category_stats_users_less_than_cutoff[attribute].dropna())
# Counts on extremes
true_extreme_counts_greater_than_cutoff, bins_greater_than_cutoff = np.histogram(true_category_stats_users_greater_than_cutoff[attribute].dropna(), bins=extreme_bins, density=True)
true_extreme_counts_less_than_cutoff, bins_less_than_cutoff = np.histogram(true_category_stats_users_less_than_cutoff[attribute].dropna(), bins=extreme_bins, density=True)
# Probability values
true_prob_values_high=np.array([(true_extreme_counts_greater_than_cutoff[0]*0.05), (true_extreme_counts_greater_than_cutoff[1]*0.9), (true_extreme_counts_greater_than_cutoff[2]*0.05)])
true_prob_values_low=np.array([(true_extreme_counts_less_than_cutoff[0]*0.05), (true_extreme_counts_less_than_cutoff[1]*0.9), (true_extreme_counts_less_than_cutoff[2]*0.05)])
# Ratios
true_ratios=np.array([true_prob_values_high[0]/true_prob_values_low[0], true_prob_values_high[1]/true_prob_values_low[1], true_prob_values_high[2]/true_prob_values_low[2]])
# CDF
# Auto bins based on integer range of values
counts_greater_than_cutoff, bins_greater_than_cutoff = np.histogram(true_category_stats_users_greater_than_cutoff[attribute].dropna(), bins='auto', density=True)
counts_less_than_cutoff, bins_less_than_cutoff = np.histogram(true_category_stats_users_less_than_cutoff[attribute].dropna(), bins='auto', density=True)
all_bins=np.setdiff1d(bins_less_than_cutoff,bins_greater_than_cutoff)
true_counts_greater_than_cutoff, bins_greater_than_cutoff = np.histogram(true_category_stats_users_greater_than_cutoff[attribute].dropna(), bins=all_bins, density=True)
true_counts_less_than_cutoff, bins_less_than_cutoff = np.histogram(true_category_stats_users_less_than_cutoff[attribute].dropna(), bins=all_bins, density=True)
########### BOOTSTRAP BASED STATISTICS ##########
# Computed suff statistics
bootstrapped_KS=np.zeros(n_bootstrapping)
bootstrapped_p_val=np.zeros(n_bootstrapping)
bootstrapped_prob_values_high=np.zeros((n_bootstrapping, extreme_bins.size-1))
bootstrapped_prob_values_low=np.zeros((n_bootstrapping, extreme_bins.size-1))
bootstrapped_ratios=np.zeros((n_bootstrapping, extreme_bins.size-1))
bootstrapped_counts_greater_than_cutoff=np.zeros((n_bootstrapping, all_bins.size-1))
bootstrapped_counts_less_than_cutoff=np.zeros((n_bootstrapping, all_bins.size-1))
for n_bootstrap in np.arange(n_bootstrapping):
#print('Sample={}/{}'.format(n_bootstrap,n_bootstrapping))
# Bootstrapped sample indicators
users_greater_than_cutoff=np.random.choice(true_users_greater_than_cutoff,n_bootstrapping)
users_less_than_cutoff=np.random.choice(true_users_less_than_cutoff,n_bootstrapping)
# Bootstrapped data
category_stats_users_greater_than_cutoff = category_stats[category_stats['user_id'].isin(users_greater_than_cutoff)]
category_stats_users_less_than_cutoff = category_stats[category_stats['user_id'].isin(users_less_than_cutoff)]
# KS
bootstrapped_KS[n_bootstrap], bootstrapped_p_val[n_bootstrap] = stats.ks_2samp(category_stats_users_greater_than_cutoff[attribute].dropna(), category_stats_users_less_than_cutoff[attribute].dropna())
# Counts on extremes
counts_greater_than_cutoff, bins_greater_than_cutoff = np.histogram(category_stats_users_greater_than_cutoff[attribute].dropna(), bins=extreme_bins, density=True)
counts_less_than_cutoff, bins_less_than_cutoff = np.histogram(category_stats_users_less_than_cutoff[attribute].dropna(), bins=extreme_bins, density=True)
# Probability values
bootstrapped_prob_values_high[n_bootstrap]=np.array([(counts_greater_than_cutoff[0]*0.05), (counts_greater_than_cutoff[1]*0.9), (counts_greater_than_cutoff[2]*0.05)])
bootstrapped_prob_values_low[n_bootstrap]=np.array([(counts_less_than_cutoff[0]*0.05), (counts_less_than_cutoff[1]*0.9), (counts_less_than_cutoff[2]*0.05)])
# Ratios
bootstrapped_ratios[n_bootstrap]=bootstrapped_prob_values_high[n_bootstrap]/bootstrapped_prob_values_low[n_bootstrap]
# CDF, based on same bins as for true CDF
bootstrapped_counts_greater_than_cutoff[n_bootstrap], bins_greater_than_cutoff = np.histogram(category_stats_users_greater_than_cutoff[attribute].dropna(), bins=all_bins, density=True)
bootstrapped_counts_less_than_cutoff[n_bootstrap], bins_less_than_cutoff = np.histogram(category_stats_users_less_than_cutoff[attribute].dropna(), bins=all_bins, density=True)
else:
raise ValueError('Analysis for attribute {} not implemented'.format(attribute))
# Print bootstrap results
print('*************************************************************************')
print('******** {0} KS={1:.3f} (p={2}) ***********'.format(
attribute, true_KS, true_p_val
))
print('******** {0} Bootstrapped KS={1:.3f}+/-{2:.3f} (p={3} (+/-{4}))***********'.format(
attribute, bootstrapped_KS.mean(), bootstrapped_KS.std(), bootstrapped_p_val.mean(), bootstrapped_p_val.std()
))
print('******** {0} Bootstrapped KS={1:.3f}({2:.3f},{3:.3f}) p={4} ({5},{6}))***********'.format(
attribute, bootstrapped_KS.mean(), np.percentile(bootstrapped_KS, 2.5, axis=0), np.percentile(bootstrapped_KS, 97.5, axis=0), bootstrapped_p_val.mean(), np.percentile(bootstrapped_p_val, 2.5, axis=0), np.percentile(bootstrapped_p_val, 97.5, axis=0)
))
print('Bins \t\t\t & p < 0.05 \t\t & 0.05 \leq p < 0.95 \t & 0.95 \leq 1')
print('True ratio \t\t & {0:.3f} \t\t & {1:.3f} \t\t & {2:.3f}'.format(true_ratios[0],true_ratios[1],true_ratios[2]))
print('Bootstrapped ratio \t & {0:.3f}+/-{1:.3f} \t & {2:.3f}+/-{3:.3f} \t & {4:.3f}+/-{5:.3f}'.format(
bootstrapped_ratios.mean(axis=0)[0],bootstrapped_ratios.std(axis=0)[0],
bootstrapped_ratios.mean(axis=0)[1],bootstrapped_ratios.std(axis=0)[1],
bootstrapped_ratios.mean(axis=0)[2],bootstrapped_ratios.std(axis=0)[2]
))
print('Bootstrapped ratio \t & {0:.3f} ({1:.3f}, {2:.3f}) \t & {3:.3f} ({4:.3f}, {5:.3f}) \t & {6:.3f} ({7:.3f}, {8:.3f})'.format(
bootstrapped_ratios.mean(axis=0)[0], np.percentile(bootstrapped_ratios[:,0], 2.5, axis=0), np.percentile(bootstrapped_ratios[:,0], 97.5, axis=0),
bootstrapped_ratios.mean(axis=0)[1], np.percentile(bootstrapped_ratios[:,1], 2.5, axis=0), | np.percentile(bootstrapped_ratios[:,1], 97.5, axis=0) | numpy.percentile |
import numpy as np
def eta(xx,c=1):
x = np.array(xx)/c
if np.isscalar(x):
y = np.zeros(1)
x = | np.array([x]) | numpy.array |
# coding=utf-8
# Copyright 2019 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for Rouge metric."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensor2tensor.utils import rouge
import tensorflow as tf
class TestRouge2Metric(tf.test.TestCase):
"""Tests the rouge-2 metric."""
def testRouge2Identical(self):
hypotheses = np.array([[1, 2, 3, 4, 5, 1, 6, 7, 0],
[1, 2, 3, 4, 5, 1, 6, 8, 7]])
references = np.array([[1, 2, 3, 4, 5, 1, 6, 7, 0],
[1, 2, 3, 4, 5, 1, 6, 8, 7]])
self.assertAllClose(rouge.rouge_n(hypotheses, references), 1.0, atol=1e-03)
def testRouge2Disjoint(self):
hypotheses = np.array([[1, 2, 3, 4, 5, 1, 6, 7, 0],
[1, 2, 3, 4, 5, 1, 6, 8, 7]])
references = np.array([[8, 9, 10, 11, 12, 13, 14, 15, 16, 17],
[9, 10, 11, 12, 13, 14, 15, 16, 17, 0]])
self.assertEqual(rouge.rouge_n(hypotheses, references), 0.0)
def testRouge2PartialOverlap(self):
hypotheses = np.array([[1, 2, 3, 4, 5, 1, 6, 7, 0],
[1, 2, 3, 4, 5, 1, 6, 8, 7]])
references = np.array([[1, 9, 2, 3, 4, 5, 1, 10, 6, 7],
[1, 9, 2, 3, 4, 5, 1, 10, 6, 7]])
self.assertAllClose(rouge.rouge_n(hypotheses, references), 0.53, atol=1e-03)
class TestRougeLMetric(tf.test.TestCase):
"""Tests the rouge-l metric."""
def testRougeLIdentical(self):
hypotheses = np.array([[1, 2, 3, 4, 5, 1, 6, 7, 0],
[1, 2, 3, 4, 5, 1, 6, 8, 7]])
references = np.array([[1, 2, 3, 4, 5, 1, 6, 7, 0],
[1, 2, 3, 4, 5, 1, 6, 8, 7]])
self.assertAllClose(
rouge.rouge_l_sentence_level(hypotheses, references), 1.0, atol=1e-03)
def testRougeLDisjoint(self):
hypotheses = np.array([[1, 2, 3, 4, 5, 1, 6, 7, 0],
[1, 2, 3, 4, 5, 1, 6, 8, 7]])
references = np.array([[8, 9, 10, 11, 12, 13, 14, 15, 16, 17],
[9, 10, 11, 12, 13, 14, 15, 16, 17, 0]])
self.assertEqual(rouge.rouge_l_sentence_level(hypotheses, references), 0.0)
def testRougeLPartialOverlap(self):
hypotheses = np.array([[1, 2, 3, 4, 5, 1, 6, 7, 0],
[1, 2, 3, 4, 5, 1, 6, 8, 7]])
references = np.array([[1, 9, 2, 3, 4, 5, 1, 10, 6, 7],
[1, 9, 2, 3, 4, 5, 1, 10, 6, 7]])
self.assertAllClose(
rouge.rouge_l_sentence_level(hypotheses, references), 0.837, atol=1e-03)
class TestRougeMetricsE2E(tf.test.TestCase):
"""Tests the rouge metrics end-to-end."""
def testRouge2MetricE2E(self):
vocab_size = 4
batch_size = 12
seq_length = 12
predictions = tf.one_hot(
| np.random.randint(vocab_size, size=(batch_size, seq_length, 1, 1)) | numpy.random.randint |
'''
file: hum36m_dataloader.py
author: zhangxiong(<EMAIL>)
date: 2018_05_09
purpose: load hum3.6m data
'''
import sys
from torch.utils.data import Dataset, DataLoader
import os
import glob
import numpy as np
import random
import cv2
import json
import h5py
import torch
sys.path.append('./src')
from util import calc_aabb, cut_image, flip_image, draw_lsp_14kp__bone, rectangle_intersect, get_rectangle_intersect_ratio, convert_image_by_pixformat_normalize, reflect_pose, reflect_lsp_kp
from config import args
from timer import Clock
class hum36m_dataloader(Dataset):
def __init__(self, data_set_path, use_crop, scale_range, use_flip, min_pts_required, pix_format = 'NHWC', normalize = False, flip_prob = 0.3):
self.data_folder = data_set_path
self.use_crop = use_crop
self.scale_range = scale_range
self.use_flip = use_flip
self.flip_prob = flip_prob
self.min_pts_required = min_pts_required
self.pix_format = pix_format
self.normalize = normalize
self._load_data_set()
def _load_data_set(self):
clk = Clock()
self.images = []
self.kp2ds = []
self.boxs = []
self.kp3ds = []
self.shapes = []
self.poses = []
print('start loading hum3.6m data.')
anno_file_path = os.path.join(self.data_folder, 'annot.h5')
with h5py.File(anno_file_path) as fp:
total_kp2d = np.array(fp['gt2d'])
total_kp3d = np.array(fp['gt3d'])
total_shap = np.array(fp['shape'])
total_pose = np.array(fp['pose'])
total_image_names = np.array(fp['imagename'])
assert len(total_kp2d) == len(total_kp3d) and len(total_kp2d) == len(total_image_names) and \
len(total_kp2d) == len(total_shap) and len(total_kp2d) == len(total_pose)
l = len(total_kp2d)
def _collect_valid_pts(pts):
r = []
for pt in pts:
if pt[2] != 0:
r.append(pt)
return r
for index in range(l):
kp2d = total_kp2d[index].reshape((-1, 3))
if np.sum(kp2d[:, 2]) < self.min_pts_required:
continue
lt, rb, v = calc_aabb(_collect_valid_pts(kp2d))
self.kp2ds.append(np.array(kp2d.copy(), dtype = np.float))
self.boxs.append((lt, rb))
self.kp3ds.append(total_kp3d[index].copy().reshape(-1, 3))
self.shapes.append(total_shap[index].copy())
self.poses.append(total_pose[index].copy())
self.images.append(os.path.join(self.data_folder, 'image') + total_image_names[index].decode())
print('finished load hum3.6m data, total {} samples'.format(len(self.kp3ds)))
clk.stop()
def __len__(self):
return len(self.images)
def __getitem__(self, index):
image_path = self.images[index]
kps = self.kp2ds[index].copy()
box = self.boxs[index]
kp_3d = self.kp3ds[index].copy()
scale = | np.random.rand(4) | numpy.random.rand |
"""
Author =
"""
from __future__ import division, print_function
from os.path import isfile, join
import numpy as np
import fitsio
import datapath
import specutils
from glob import glob
_spplate_hdunames = ('flux','ivar','andmask','ormask','disp','plugmap','sky', 'loglam', )
def spplate_filename(platein, mjdin, path):
"""Name for a spPlate file
"""
plate = np.ravel(platein)
mjd = np.ravel(mjdin)
if plate.size != 1 or mjd.size !=1:
raise ValueError("I can only take one plate and one mjd.")
return join(path,"spPlate-{0:04d}-{1:05d}.fits".format(int(plate), int(mjd)))
# http://data.sdss3.org/datamodel/files/SPECTRO_REDUX/RUN2D/PLATE4/spPlate.html
def read_spec(platein, mjdin, fiberin, path, output):
"""Read spectra from a *single* spPlate file
a simpler, hopefully better version of readspec in idlspec2d/pydl
requiring mjd and path so that one knows what they are doing
output should be a tuple, e.g. ('flux', 'ivar',)
"""
# check desired output
if type(output) != tuple:
raise TypeError("output should be a tuple.")
for thishdu in output:
if type(thishdu) != str:
raise TypeError("hdunames in the output should be strings.")
if thishdu not in _spplate_hdunames:
raise ValueError("{0} is not in the hdu list {1}".format(thishdu, _spplate_hdunames))
# check input
plate = np.ravel(platein)
mjd = np.ravel(mjdin)
if plate.size != 1 or mjd.size !=1:
raise ValueError("I can only take one plate and one mjd.")
# get filename
filename = spplate_filename(plate, mjd, path)
if not isfile(filename):
raise ValueError("I can't find this file {0}.".format(filename))
#print(filename)
spplate_fits = fitsio.FITS(filename)
hdr = spplate_fits[0].read_header()
fiber = np.ravel(fiberin) - 1
if np.amin(fiber)<0 or np.amax(fiber)>=hdr['naxis2']:
raise ValueError("Fiber ID cannot be smaller than 1 or larger than {0}.".format(hdr['naxis2']))
# output, a dictionary
spplate_data = dict()
for thishdu in output:
if thishdu == "loglam":
c0 = hdr['coeff0']
c1 = hdr['coeff1']
npix = hdr['naxis1']
# loglam vector is the same for a given plate
spplate_data[thishdu] = c0+c1*np.arange(npix, dtype='d')
else:
index = _spplate_hdunames.index(thishdu)
#spplate_data[thishdu] = spplate_fits[index].read(rows=fiber.tolist)
# read in the whole image for now instead of a for loop to read in row by row
spplate_data[thishdu] = (spplate_fits[index].read())[fiber,:] if fiber.size>1 else np.ravel((spplate_fits[index].read())[fiber,:])
spplate_fits.close()
return spplate_data
def load_interp_spec(objs, newloglam, path, rest=False):
"""Read spectra of a list of objects and interpolate them onto the same wavelength grid
objs : a structured array with 'plate', 'mjd', 'fiber'
newloglam : desired wavelength grid in logarithmic scale
path :
"""
# check objs, newloglam
objs = np.ravel(objs)
# output:
flux = np.zeros((objs.size, newloglam.size))
ivar = | np.zeros((objs.size, newloglam.size)) | numpy.zeros |
#-----------------------------------------------------------------------------
# Name: SParameter.py
# Purpose: Tools to analyze SParameter Data
# Author: <NAME>
# Created: 4/13/2016
# License: MIT License
#-----------------------------------------------------------------------------
""" Sparameter is a module with tools for analyzing Scattering parameter data. It contains functions
for comparing, applying corrections, uncertainty analysis and plotting scattering parameters.
see also <a href="./NISTUncertainty.m.html">NISTUncertainty</a>
Examples
--------
#!python
>>test_compare_s2p_plots()
<h3><a href="../../../Examples/html/Applying_Calibration_Example.html">Applying a Correction Example</a></h3>
<h3><a href="../../../Examples/html/Calrep_Example.html">Using the Python Verison of Calrep</a></h3>
<h3><a href="../../../Examples/html/Creating_Comparing_Reference_Curves_MUFModels.html">Analysis of files made by the
NIST Microwave Uncertainty Framework</a></h3>
Requirements
------------
+ [sys](https://docs.python.org/2/library/sys.html)
+ [os](https://docs.python.org/2/library/os.html)
+ [re](https://docs.python.org/2/library/re.html)
+ [datetime](https://docs.python.org/2/library/datetime.html)
+ [math](https://docs.python.org/2/library/math.html)
+ [cmath](https://docs.python.org/2/library/cmath.html)
+ [numpy](https://docs.scipy.org/doc/)
+ [scipy](https://docs.scipy.org/doc/)
+ [pandas](http://pandas.pydata.org/)
+ [matplotlib](http://matplotlib.org/)
+ [pyMez](https://github.com/aricsanders/pyMez)
Help
---------------
<a href="./index.html">`pyMez.Code.Analysis`</a>
<div>
<a href="../../../pyMez_Documentation.html">Documentation Home</a> |
<a href="../../index.html">API Documentation Home</a> |
<a href="../../../Examples/html/Examples_Home.html">Examples Home</a> |
<a href="../../../Reference_Index.html">Index</a>
</div>
"""
#-----------------------------------------------------------------------------
# Standard Imports
import os
import re
import datetime
import sys
import cmath
import math
#-----------------------------------------------------------------------------
# Third Party Imports
sys.path.append(os.path.join(os.path.dirname( __file__ ), '..','..'))
try:
import numpy as np
except:
np.ndarray='np.ndarray'
print("Numpy was not imported")
pass
try:
import pandas
except:
print("Pandas was not imported")
pass
try:
from scipy.stats.mstats import gmean
except:
print("The function gmean from the package scipy.stats.mstats did not import correctly ")
try:
from statsmodels.robust.scale import mad
except:
print("The function mad from the package statsmodels.robust.scale did not import correctly ")
try:
#Todo: this could lead to a cyclic dependency, it really should import only the models it analyzes
#Todo: If analysis is to be in the top import, none of the models should rely on it
#import pyMez.Code.DataHandlers.NISTModels
from Code.DataHandlers.NISTModels import *
from Code.DataHandlers.TouchstoneModels import *
from Code.DataHandlers.GeneralModels import *
from Code.Analysis.NISTUncertainty import *
from Code.DataHandlers.Translations import *
#from pyMez import *
except:
print("The subpackage pyMez.Code.DataHandlers did not import properly,"
"please check that it is on the python path and that unit tests passed")
raise
try:
import matplotlib.pyplot as plt
except:
print("The module matplotlib was not found,"
"please put it on the python path")
#-----------------------------------------------------------------------------
# Module Constants
# Does this belong in tests or a Data folder
#Todo: This should not be here..
ONE_PORT_DUT=os.path.join(os.path.dirname(os.path.realpath(__file__)),'Tests')
#-----------------------------------------------------------------------------
# Module Functions
def cascade(s1,s2):
"""Cascade returns the cascaded sparameters of s1 and s2. s1 and s2 should be in complex list form
[[f,S11,S12,S21,S22]...] and the returned sparameters will be in the same format. Assumes that s1,s2 have the
same frequencies. If 1-S2_22*S1_11 is zero we add a small non zero real part or loss."""
out_sparameters=[]
for row_index,row in enumerate(s1):
[f1,S1_11,S1_12,S1_21,S1_22]=row
[f2,S2_11,S2_12,S2_21,S2_22]=s2[row_index]
if f1!=f2:
raise TypeError("Frequencies do not match! F lists must be the same")
denominator=(1-S1_22*S2_11)
if denominator ==complex(0,0):
denominator=complex(10**-20,0)
S11=S1_11+S2_11*(S1_12*S1_21)/denominator
S12=S1_12*S2_12/(denominator)
S21=S1_21*S2_21/denominator
S22=S2_22+S1_22*(S2_12*S2_21)/denominator
new_row=[f1,S11,S12,S21,S22]
out_sparameters.append(new_row)
return out_sparameters
def add_white_noise_s2p(s2p_model,noise_level=.0005):
"""Adds white noise to a s2p in RI format and returns a new s2p with the noise added to each real and imaginary component"""
s2p_model.change_data_format("RI")
s2p_data=s2p_model.data[:]
noisy_data=[]
for row in s2p_data:
new_row=[row[0]]
sparameters=np.array(row[1:])+np.random.normal(loc=0,scale=noise_level,size=len(row[1:]))
new_row=new_row+sparameters.tolist()
noisy_data.append(new_row)
options=s2p_model.options.copy()
options["file_path"]=None
options["data"]=noisy_data
options["sparameter_complex"]=[]
noisy_s2p=S2PV1(**options)
return noisy_s2p
def frequency_model_collapse_multiple_measurements(model, **options):
"""Returns a model with a single set of frequencies. Default is to average values together
but geometric mean, std, variance, rss, mad and median are options.
Geometric means of odd number of negative values fails"""
if type(model) in [pandas.DataFrame]:
model_1 = DataFrame_to_AsciiDataTable(model)
defaults = {"method": "mean"}
# load other options from model
for option, value in model.options.items():
if not re.search('begin_line|end_line', option):
defaults[option] = value
for element in model.elements:
if model.__dict__[element]:
if re.search("meta", element, re.IGNORECASE):
defaults["metadata"] = model.metadata.copy()
else:
defaults[element] = model.__dict__[element][:]
# We need to preserve the frequency column some how
collapse_options = {}
for key, value in defaults.items():
collapse_options[key] = value
for key, value in options.items():
collapse_options[key] = value
unique_frequency_list = sorted(list(set(model["Frequency"])))
frequency_selector = model.column_names.index("Frequency")
out_data = []
for index, frequency in enumerate(unique_frequency_list):
data_row = [x for x in model.data[:] if x[frequency_selector] == frequency]
if re.search('mean|av', collapse_options["method"], re.IGNORECASE):
new_row = np.mean(np.array(data_row), axis=0).tolist()
elif re.search('median', collapse_options["method"], re.IGNORECASE):
new_row = np.median(np.array(data_row), axis=0).tolist()
elif re.search('geometric', collapse_options["method"], re.IGNORECASE):
new_row = gmean(np.array(data_row), axis=0).tolist()
elif re.search('st', collapse_options["method"], re.IGNORECASE):
new_row = np.std(np.array(data_row), axis=0).tolist()
elif re.search('var', collapse_options["method"], re.IGNORECASE):
new_row = np.var(np.array(data_row), axis=0, dtype=np.float64).tolist()
elif re.search('rms', collapse_options["method"], re.IGNORECASE):
new_row = np.sqrt(np.mean(np.square(np.array(data_row)), axis=0, dtype=np.float64)).tolist()
elif re.search('rss', collapse_options["method"], re.IGNORECASE):
new_row = np.sqrt(np.sum(np.square(np.array(data_row)), axis=0, dtype=np.float64)).tolist()
elif re.search('mad', collapse_options["method"], re.IGNORECASE):
new_row = mad(np.array(data_row), axis=0).tolist()
new_row[frequency_selector]=frequency
out_data.append(new_row)
collapse_options["data"] = out_data
if collapse_options["specific_descriptor"]:
collapse_options["specific_descriptor"] = collapse_options["method"] + "_" + \
collapse_options["specific_descriptor"]
resulting_model = AsciiDataTable(None, **collapse_options)
return resulting_model
def frequency_model_difference(model_1, model_2, **options):
"""Takes the difference of two models that both have frequency and a similar set of columns. Returns an object that is
a list of [[frequency,column_1,..column_n],...] where columns are the same in the models. If a particular subset of
columns is desired use columns=["Frequency","magS11] models can be any subclass of AsciiDataTable, SNP, or
pandas.DataFrame, if a column is a non-numeric type it drops it. The frequency list should be unique
(no multiple frequencies) for at least one model"""
# Set up defaults and pass options
defaults = {"columns": "all", "interpolate": False, "average": True}
difference_options = {}
for key, value in defaults.items():
difference_options[key] = value
for key, value in options.items():
difference_options[key] = value
# first check type, if it is a panadas data frame a little conversion is needed, else is for all other models
if type(model_1) in [pandas.DataFrame]:
model_1 = DataFrame_to_AsciiDataTable(model_1)
if type(model_2) in [pandas.DataFrame]:
model_2 = DataFrame_to_AsciiDataTable(model_2)
# now start with a set of frequencies (unique values from both)
frequency_set_1 = set(model_1["Frequency"])
frequency_set_2 = set(model_2["Frequency"])
model_2_frequency_selector = model_2.column_names.index('Frequency')
column_names_set_1 = set(model_1.column_names)
column_names_set_2 = set(model_2.column_names)
# All points must be in the intersection to be used
frequency_intersection = list(frequency_set_1.intersection(frequency_set_2))
column_names_intersection = list(column_names_set_1.intersection(column_names_set_2))
if not frequency_intersection:
print("The models do not have any frequency points in common")
return None
new_column_names = ["Frequency"]
column_types=['float']
for column_index, column in enumerate(model_1.column_names):
if column in column_names_intersection and column not in ["Frequency"]:
new_column_names.append(column)
column_types.append(model_1.options["column_types"][column_index])
difference_data = []
for row_index, frequency in enumerate(model_1["Frequency"]):
new_row = [frequency]
if frequency in frequency_intersection:
model_2_frequency_row =list( filter(lambda x: x[model_2_frequency_selector] == frequency, model_2.data))[0]
# print("{0} is {1}".format("model_2_frequency_row",model_2_frequency_row))
for column_index, column in enumerate(model_1.column_names):
if column in column_names_intersection and column not in ["Frequency"]:
model_2_column_selector = model_2.column_names.index(column)
if re.search('int|float',
model_1.options["column_types"][column_index],
re.IGNORECASE) and re.search('int|float',
model_2.options["column_types"][model_2_column_selector],
re.IGNORECASE):
new_row.append(
model_1.data[row_index][column_index] - model_2_frequency_row[model_2_column_selector])
# Print("New Column Names are {0}".format(new_column_names))
elif difference_options["columns"] in ["all"]:
new_row.append(model_1.data[row_index][column_index])
difference_data.append(new_row)
difference_options["column_names"] = new_column_names
# print("New Column Names are {0}".format(new_column_names))
difference_options["data"] = difference_data
difference_options["column_types"]=column_types
#print column_types
result = AsciiDataTable(None, **difference_options)
return result
def create_monte_carlo_reference_curve(monte_carlo_directory, **options):
"""Creates a standard curve from a montecarlo directory (from MUF). The standard curve
has a mean or median and a standard deviation for the uncertainty"""
defaults = {"method": "mean", "format": "RI", "filter": "s\d+p"}
reference_options = {}
for key, value in defaults.items():
reference_options[key] = value
for key, value in options.items():
reference_options[key] = value
file_names = os.listdir(monte_carlo_directory)
filtered_file_names = []
for file_name in file_names[:]:
if re.search(reference_options["filter"], file_name, re.IGNORECASE):
filtered_file_names.append(file_name)
file_names = filtered_file_names
# print file_names
initial_file = SNP(os.path.join(monte_carlo_directory, file_names[0]))
initial_file.change_data_format(reference_options["format"])
combined_table = Snp_to_AsciiDataTable(initial_file)
for file_name in file_names[1:]:
snp_file = SNP(os.path.join(monte_carlo_directory, file_name))
snp_file.change_data_format(reference_options["format"])
table = Snp_to_AsciiDataTable(snp_file)
combined_table + table
mean_table = frequency_model_collapse_multiple_measurements(combined_table, method=reference_options["method"])
standard_deviation = frequency_model_collapse_multiple_measurements(combined_table,
method='std')
new_column_names = ['Frequency'] + ['u' + name for name in standard_deviation.column_names[1:]]
standard_deviation.column_names = new_column_names
reference_curve = ascii_data_table_join("Frequency", mean_table, standard_deviation)
reference_curve.options["value_column_names"] = mean_table.column_names[1:]
reference_curve.options["uncertainty_column_names"] = new_column_names[1:]
return reference_curve
def create_sensitivity_reference_curve(sensitivity_directory,nominal_file_path="../DUT_0.s2p",**options):
"""Creates a standard curve from a sensitivity_directory usually called Covariance(from MUF). The standard curve
has a mean or median and a RMS variance from the nominal value for the uncertainty"""
defaults = {"format": "RI", "filter": "s\d+p"}
reference_options = {}
for key, value in defaults.items():
reference_options[key] = value
for key, value in options.items():
reference_options[key] = value
file_names = os.listdir(sensitivity_directory)
filtered_file_names = []
for file_name in file_names[:]:
if re.search(reference_options["filter"], file_name, re.IGNORECASE):
filtered_file_names.append(file_name)
file_names = filtered_file_names
# print file_names
nominal_file=SNP(os.path.join(sensitivity_directory, nominal_file_path))
nominal_file.change_data_format(reference_options["format"])
initial_file = SNP(os.path.join(sensitivity_directory, file_names[0]))
initial_file.change_data_format(reference_options["format"])
initial_difference=frequency_model_difference(nominal_file,initial_file)
#print initial_difference.column_names
combined_table = initial_difference
for file_name in file_names[1:]:
snp_file = SNP(os.path.join(sensitivity_directory, file_name))
snp_file.change_data_format(reference_options["format"])
difference=frequency_model_difference(nominal_file,snp_file)
#print difference.column_names
# table = Snp_to_AsciiDataTable(difference)
combined_table + difference
#print combined_table.options["column_types"]
variance = frequency_model_collapse_multiple_measurements(combined_table,
method='rss')
new_column_names = ['Frequency'] + ['u' + name for name in variance.column_names[1:]]
mean_table=Snp_to_AsciiDataTable(nominal_file)
variance.column_names = new_column_names
reference_curve = ascii_data_table_join("Frequency", mean_table, variance)
reference_curve.options["value_column_names"] = mean_table.column_names[1:]
reference_curve.options["uncertainty_column_names"] = new_column_names[1:]
return reference_curve
def plot_reference_curve(reference_curve, **options):
"""Plots a frequency based reference curve by using the options
value_column_names and uncertainty_column_names."""
defaults = {"display_legend": False,
"save_plot": False,
"directory": os.getcwd(),
"specific_descriptor": "Reference_Curve",
"general_descriptor": "Plot",
"file_name": None,
"plots_per_column": 2,
"plot_format": 'b-',
"fill_color": 'k',
"fill_opacity": .25,
"fill_edge_color": 'k',
"plot_size": (8, 10),
"dpi": 80,
"independent_axis_column_name": "Frequency",
"share_x": "col"}
plot_options = {}
for key, value in defaults.items():
plot_options[key] = value
for key, value in options.items():
plot_options[key] = value
value_columns = reference_curve.options["value_column_names"]
uncertainty_columns = reference_curve.options["uncertainty_column_names"]
number_plots = len(value_columns)
number_columns = int(plot_options["plots_per_column"])
number_rows = int(round(float(number_plots) / float(number_columns)))
fig, reference_axes = plt.subplots(nrows=number_rows, ncols=number_columns,
sharex=plot_options["share_x"],
figsize=plot_options["plot_size"],
dpi=plot_options["dpi"])
x_data = reference_curve[plot_options["independent_axis_column_name"]]
for axes_index, ax in enumerate(reference_axes.flat):
y_data = np.array(reference_curve[value_columns[axes_index]])
error = np.array(reference_curve[uncertainty_columns[axes_index]])
ax.plot(x_data, y_data, plot_options["plot_format"])
ax.fill_between(x_data, y_data - error, y_data + error,
color=plot_options["fill_color"],
alpha=plot_options["fill_opacity"],
edgecolor=plot_options["fill_edge_color"])
ax.set_title(value_columns[axes_index])
plt.tight_layout()
# Dealing with the save option
if plot_options["file_name"] is None:
file_name = auto_name(specific_descriptor=plot_options["specific_descriptor"],
general_descriptor=plot_options["general_descriptor"],
directory=plot_options["directory"], extension='png', padding=3)
else:
file_name = plot_options["file_name"]
if plot_options["save_plot"]:
# print file_name
plt.savefig(os.path.join(plot_options["directory"], file_name))
else:
plt.show()
return fig
def plot_reference_curve_comparison(reference_curve_list, **options):
"""Plots a list of frequency based reference curves
by using the options value_column_names and uncertainty_column_names.
Options """
defaults = {"display_legend": False,
"save_plot": False,
"directory": os.getcwd(),
"specific_descriptor": "Reference_Curve",
"general_descriptor": "Plot",
"file_name": None,
"plots_per_column": 2,
"plot_format": '-',
"fill_color": 'k',
"fill_opacity": .25,
"fill_edge_color": 'k',
"plot_size": (8, 10),
"dpi": 80,
"independent_axis_column_name": "Frequency",
"share_x": "col",
"labels":None}
plot_options = {}
for key, value in defaults.items():
plot_options[key] = value
for key, value in options.items():
plot_options[key] = value
if plot_options["labels"]:
labels=plot_options["labels"]
else:
labels=[x.path for x in reference_curve_list]
value_columns = reference_curve_list[0].options["value_column_names"]
uncertainty_columns = reference_curve_list[0].options["uncertainty_column_names"]
number_plots = len(value_columns)
number_columns = int(plot_options["plots_per_column"])
number_rows = int(round(float(number_plots) / float(number_columns)))
fig, reference_axes = plt.subplots(nrows=number_rows, ncols=number_columns,
sharex=plot_options["share_x"],
figsize=plot_options["plot_size"],
dpi=plot_options["dpi"])
for index,reference_curve in enumerate(reference_curve_list[:]):
value_columns = reference_curve.options["value_column_names"]
uncertainty_columns = reference_curve.options["uncertainty_column_names"]
x_data = reference_curve[plot_options["independent_axis_column_name"]]
for axes_index, ax in enumerate(reference_axes.flat):
y_data = np.array(reference_curve[value_columns[axes_index]])
error = np.array(reference_curve[uncertainty_columns[axes_index]])
ax.plot(x_data, y_data, plot_options["plot_format"],label=labels[index])
ax.fill_between(x_data, y_data - error, y_data + error,
color=plot_options["fill_color"],
alpha=plot_options["fill_opacity"],
edgecolor=plot_options["fill_edge_color"])
ax.set_title(value_columns[axes_index])
plt.tight_layout()
if plot_options["display_legend"]:
plt.legend()
# Dealing with the save option
if plot_options["file_name"] is None:
file_name = auto_name(specific_descriptor=plot_options["specific_descriptor"],
general_descriptor=plot_options["general_descriptor"],
directory=plot_options["directory"], extension='png', padding=3)
else:
file_name = plot_options["file_name"]
if plot_options["save_plot"]:
# print file_name
plt.savefig(os.path.join(plot_options["directory"], file_name))
else:
plt.show()
return fig
def calrep(raw_model,**options):
""" Performs the calrep analysis routine on a raw data format (such as OnePortRawModel, TwoPortRawModel,PowerRawModel)
Differs from the HP BASIC program in that it keeps the metadata Needs to be checked, returns 4 error terms for power
Also does not calculate all the same rows for power, expansion factor is set to 2, requires that the raw model
has the attribute raw_model.metadata["Connector_Type_Measurement"] defined. If the columns passed in raw_model
do not have repeat values or contain text the result will set connect uncertainty to zero"""
try:
mean_file=frequency_model_collapse_multiple_measurements(raw_model)
except:
mean_file=raw_model
try:
standard_deviation_file=frequency_model_collapse_multiple_measurements(raw_model,method="std")
except:
std_data=[]
for row in mean_file.data:
new_row=[]
for column in mean_file.data[0]:
new_row.append(0)
std_data.append(new_row)
standard_deviation_file=AsciiDataTable(None,column_names=raw_model.column_names,
data=std_data,column_types=raw_model.options["column_types"])
if "Direction" in mean_file.column_names and "Connect" in mean_file.column_names:
mean_file.remove_column("Direction")
mean_file.remove_column("Connect")
if "Direction" in standard_deviation_file.column_names and "Connect" in standard_deviation_file.column_names:
standard_deviation_file.remove_column("Direction")
standard_deviation_file.remove_column("Connect")
new_data=[]
new_column_names=[]
expansion_factor=2
frequency_index=mean_file.column_names.index("Frequency")
for row_index,row in enumerate(mean_file.data[:]):
new_data_row=[]
for column_index,column_name in enumerate(mean_file.column_names[:]):
if re.search("frequency",column_name,re.IGNORECASE):
if row_index==0:
new_column_names.append("Frequency")
new_data_row.append(row[column_index])
else:
if re.search("mag",column_name,re.IGNORECASE):
error_selector=0
error_letter="M"
error_parameter=column_name.replace("mag","")
elif re.search("arg|phase",column_name,re.IGNORECASE):
error_selector=1
error_letter="A"
error_parameter=column_name.replace("arg","")
elif re.search("Eff",column_name,re.IGNORECASE):
error_selector=0
error_letter="E"
error_parameter=""
else:
error_selector=0
if row_index==0:
# If this is the first row build the column names list
new_column_names.append(column_name)
new_column_names.append("u"+error_letter+"b"+error_parameter)
new_column_names.append("u"+error_letter+"a"+error_parameter)
new_column_names.append("u"+error_letter+"d"+error_parameter)
new_column_names.append("u"+error_letter+"g"+error_parameter)
# Mean Value
new_data_row.append(row[column_index])
# Type B
ub=type_b(wr_connector_type=mean_file.metadata["Connector_Type_Measurement"],
frequency=row[frequency_index],parameter=column_name,magnitude=row[column_index],format="mag")
#print("{0} is {1}".format("ub",ub))
new_data_row.append(ub[error_selector])
# Type A or SNIST
ua=S_NIST(wr_connector_type=mean_file.metadata["Connector_Type_Measurement"],
frequency=row[frequency_index],parameter=column_name,magnitude=row[column_index],format="mag")
new_data_row.append(ua[error_selector])
# Standard Deviation
ud=standard_deviation_file.data[row_index][column_index]
new_data_row.append(ud)
# Total Uncertainty
#print(" ua is {0}, ub is {1} and ud is {2}".format(ua,ub,ud))
total_uncertainty=expansion_factor*math.sqrt(ua[error_selector]**2+ub[error_selector]**2+ud**2)
new_data_row.append(total_uncertainty)
new_data.append(new_data_row)
sorted_keys=sorted(mean_file.metadata.keys())
header=["{0} = {1}".format(key,mean_file.metadata[key]) for key in sorted_keys]
column_types=["float" for column in new_column_names]
#todo: Add value_column_names and uncertainty_column_names to conform to reference curve
calrep=AsciiDataTable(None,data=new_data,column_types=column_types,
column_names=new_column_names,header=header,
metadata=mean_file.metadata)
return calrep
def one_port_robin_comparison_plot(input_asc_file,input_res_file,**options):
"""one_port_robin_comparison_plot plots a one port.asc file against a given .res file,
use device_history=True in options to show device history"""
defaults={"device_history":False,"mag_res":False}
plot_options={}
for key,value in defaults.items():
plot_options[key]=value
for key,value in options.items():
plot_options[key]=value
history=np.loadtxt(input_res_file,skiprows=1)
column_names=["Frequency",'magS11','argS11','magS11N','argS11N','UmagS11N','UargS11N']
options={"data":history.tolist(),"column_names":column_names,"column_types":['float' for column in column_names]}
history_table=AsciiDataTable(None,**options)
table=OnePortCalrepModel(input_asc_file)
if plot_options["device_history"]:
history_frame=pandas.read_csv(ONE_PORT_DUT)
device_history=history_frame[history_frame["Device_Id"]==table.header[0].rstrip().lstrip()]
fig, (ax0, ax1) = plt.subplots(nrows=2, sharex=True)
ax0.errorbar(history_table.get_column('Frequency'),history_table.get_column('magS11N'),fmt='k--',
yerr=history_table.get_column('UmagS11N'),label="History")
ax0.errorbar(table.get_column('Frequency'),table.get_column('magS11'),
yerr=table.get_column('uMg'),fmt='ro',label="Current Measurement",alpha=.3)
if plot_options["device_history"]:
ax0.errorbar(device_history['Frequency'].tolist(),device_history['magS11'].tolist(),fmt='bs',
yerr=device_history['uMg'].tolist(),label="From .asc", alpha=.5)
if plot_options["mag_res"]:
ax0.errorbar(history_table.get_column('Frequency'),history_table.get_column('mag'),fmt='gx',
yerr=history_table.get_column('UmagS11N'),label="From mag in res")
ax0.set_title('Magnitude S11')
ax1.errorbar(history_table.get_column('Frequency'),history_table.get_column('arg'),fmt='k--',
yerr=history_table.get_column('UargS11N'),label="history")
ax1.errorbar(table.get_column('Frequency'),table.get_column('arg'),
yerr=table.get_column('uAg'),fmt='ro',label="Current Measurement",alpha=.3)
if plot_options["device_history"]:
ax1.errorbar(device_history['Frequency'].tolist(),device_history['arg'].tolist(),fmt='bs',
yerr=device_history['uAg'].tolist(),label="From .asc", alpha=.5)
ax1.set_title('Phase S11')
ax0.legend(loc='lower left', shadow=True)
plt.show()
return fig
def two_port_swap_ports(complex_data):
"""Accepts data in [[frequency, S11, S21, S12, S22]..] format and returns
[[frequency, S22, S12, S21, S11]..]"""
out_data=[]
for row in complex_data:
[frequency, S11, S21, S12, S22]=row
new_row=[frequency, S22, S12, S21, S11]
out_data.append(new_row)
return out_data
def two_port_complex_to_matrix_form(complex_data):
"""two_port_complex_to_matrix_form takes a list of [[frequency,S11,S21,S12,S22],..] and
returns a list in the
form [[frequency,np.matrix([[S11,S12],[S21,S22]])]..], it is meant to prepare data for correction"""
out_list=[]
for row in complex_data:
frequency=row[0]
[S11,S21,S12,S22]=row[1:]
m=np.matrix([[S11,S12],[S21,S22]])
out_list.append([frequency,m])
#print out_list
return out_list
def two_port_matrix_to_complex_form(matrix_form_data):
"""two_port_matrix_to_complex_form takes a list of [[frequency,np.matrix([[S11,S12],[S21,S22]])]..]
and returns a list in the
form [[frequency,S11,S21,S12,S22],..] , it is meant to undo two_port_complex_to_matrix_form"""
out_list=[]
for row in matrix_form_data:
frequency=row[0]
m=row[1]
[S11,S21,S12,S22]=[m[0,0],m[1,0],m[0,1],m[1,1]]
out_list.append([frequency,S11,S12,S21,S22])
return out_list
def invert_two_port_matrix_list(two_port_matrix_form):
"""invert_two_port_matrix_list inverts all elements in the list two_port_matrix_form,
which is in the format [[frequency,np.matrix([[S11,S12],[S21,S22]])]..] and returns a list
in [[frequency,inv(np.matrix([[S11,S12],[S21,S22]]))]..] format works on any list in the form [value, matrix]
"""
out_list=[]
for row in two_port_matrix_form:
frequency=row[0]
m=row[1]
m_inv=np.linalg.inv(m)
out_list.append([frequency,m_inv])
return out_list
def polar_average(complex_number_1,complex_number_2):
"""Averages 2 complex numbers in polar coordinates and returns a single complex number"""
polar_number_1=cmath.polar(complex_number_1)
polar_number_2=cmath.polar(complex_number_2)
average_length=(polar_number_1[0]+polar_number_2[0])/2.
average_phase=(polar_number_1[1]+polar_number_2[1])/2.
out_value=cmath.rect(average_length,average_phase)
return out_value
def polar_geometric_average(complex_number_1,complex_number_2):
"""Averages 2 complex numbers in polar coordinates and returns a single complex number"""
polar_number_1=cmath.polar(complex_number_1)
polar_number_2=cmath.polar(complex_number_2)
average_length=(polar_number_1[0]*polar_number_2[0])**.5
average_phase=(polar_number_1[1]+polar_number_2[1])/2
out_value=cmath.rect(average_length,average_phase-math.pi)
return out_value
def S_to_T(S_list):
"""Converts S-parameters into a T Matrix. Input form should be in frequency, np.matrix([[S11,S12],[S21,S22]])
format. Returns a list in [frequency, np.matrix] format """
t_complex_list=[]
t_matrix=[]
for row in S_list:
frequency=row[0]
m=row[1]
T11=-np.linalg.det(m)/m[1,0]
T12=m[0,0]/m[1,0]
T21=-m[1,1]/m[1,0]
T22=1/m[1,0]
t_matrix.append([frequency,np.matrix([[T11,T12],[T21,T22]])])
t_complex_list.append([frequency,T11,T12,T21,T22])
return t_matrix
def T_to_S(T_list):
"""Converts T Matrix into S parameters. Input form should be in frequency, np.matrix([[T11,T12],[T21,T22]])
format. Returns a list in [frequency, np.matrix] format."""
S_list=[]
for row in T_list:
frequency=row[0]
m=row[1]
S11=m[0,1]/m[1,1]
S12=np.linalg.det(m)/m[1,1]
S21=1/m[1,1]
S22=-m[1,0]/m[1,1]
S_list.append([frequency,np.matrix([[S11,S12],[S21,S22]])])
return S_list
def unwrap_phase(phase_list):
"""unwrap_phase returns an unwraped phase list given a wraped phase list,
assumed units are degrees """
unwrapped_phase_list=[]
phase_list_copy=phase_list[:]
i=1
n=0
while(i+1<len(phase_list)):
if abs(phase_list[i]-phase_list[i-1])>90:
if phase_list[i]-phase_list[i-1]>0:
n+=1
else:
n-=1
phase_list_copy[i]=phase_list_copy[i+1]-n*360
phase_list_copy[i+1]=phase_list_copy[i+1]-n*360
i+=1
return phase_list_copy
def correct_sparameters_eight_term(sparameters_complex,eight_term_correction,reciprocal=True):
"""Applies the eight term correction to sparameters_complex and returns
a correct complex list in the form of [[frequency,S11,S21,S12,S22],..]. The eight term
correction should be in the form [[frequency,S1_11,S1_21,S1_12,S1_22,S2_11,S2_21,S2_12,S2_22]..]
Use s2p.sparameter_complex as input."""
# first transform both lists to matrices
s2p_matrix_list=two_port_complex_to_matrix_form(sparameters_complex)
s1_list=[[row[0],row[1],row[2],row[3],row[4]] for row in eight_term_correction]
s2_list=[[row[0],row[5],row[6],row[7],row[8]] for row in eight_term_correction]
s1_matrix_list=two_port_complex_to_matrix_form(s1_list)
s2_matrix_list=two_port_complex_to_matrix_form(s2_list)
# now transform to T matrices
t_matrix_list=S_to_T(s2p_matrix_list)
x_matrix_list=S_to_T(s1_matrix_list)
y_matrix_list=S_to_T(s2_matrix_list)
# now invert x
x_inverse_matrix_list=invert_two_port_matrix_list(x_matrix_list)
y_inverse_matrix_list=invert_two_port_matrix_list(y_matrix_list)
# now apply the correction
t_corrected_list=[]
for index,row in enumerate(t_matrix_list):
frequency=row[0]
t_corrected=x_inverse_matrix_list[index][1]*row[1]*y_inverse_matrix_list[index][1]
t_corrected_list.append([frequency,t_corrected])
# now transform back to S
s_corrected_matrix_list =T_to_S(t_corrected_list)
# now put back into single row form
s_corrected_list=two_port_matrix_to_complex_form(s_corrected_matrix_list)
# now we take the geometric average and replace S12 and S21 with it
if reciprocal:
s_averaged_corrected=[]
phase_last=0
for row in s_corrected_list:
[frequency,S11,S21,S12,S22]=row
# S12 and S21 are averaged together in a weird way that makes phase continuous
geometric_mean=cmath.sqrt(S21*S12)
root_select=1
phase_new=cmath.phase(geometric_mean)
# if the phase jumps by >180 but less than 270, then pick the other root
if abs(phase_new-phase_last)>math.pi/2 and abs(phase_new-phase_last)<3*math.pi/2:
root_select=-1
mean_S12_S21=root_select*cmath.sqrt(S21*S12)
s_averaged_corrected.append([frequency,S11,mean_S12_S21,mean_S12_S21,S22])
phase_last=cmath.phase(mean_S12_S21)
s_corrected_list=s_averaged_corrected
else:
pass
return s_corrected_list
def uncorrect_sparameters_eight_term(sparameters_complex,eight_term_correction,reciprocal=True):
"""Removes the eight term correction to sparameters_complex and returns
a uncorrected (reference plane is measurement)
complex list in the form of [[frequency,S11,S21,S12,S22],..]. The eight term
correction should be in the form [[frequency,S1_11,S1_21,S1_12,S1_22,S2_11,S2_21,S2_12,S2_22]..]
Use s2p.sparameter_complex as input."""
# first transform both lists to matrices
s2p_matrix_list=two_port_complex_to_matrix_form(sparameters_complex)
s1_list=[[row[0],row[1],row[2],row[3],row[4]] for row in eight_term_correction]
s2_list=[[row[0],row[5],row[6],row[7],row[8]] for row in eight_term_correction]
s1_matrix_list=two_port_complex_to_matrix_form(s1_list)
s2_matrix_list=two_port_complex_to_matrix_form(s2_list)
# now transform to T matrices
t_matrix_list=S_to_T(s2p_matrix_list)
x_matrix_list=S_to_T(s1_matrix_list)
y_matrix_list=S_to_T(s2_matrix_list)
# now apply the correction
t_uncorrected_list=[]
for index,row in enumerate(t_matrix_list):
frequency=row[0]
t_corrected=x_matrix_list[index][1]*row[1]*y_matrix_list[index][1]
t_uncorrected_list.append([frequency,t_corrected])
# now transform back to S
s_uncorrected_matrix_list =T_to_S(t_uncorrected_list)
# now put back into single row form
s_uncorrected_list=two_port_matrix_to_complex_form(s_uncorrected_matrix_list)
# now we take the geometric average and replace S12 and S21 with it
if reciprocal:
s_averaged_corrected=[]
phase_last=0
for row in s_uncorrected_list:
[frequency,S11,S21,S12,S22]=row
# S12 and S21 are averaged together in a weird way that makes phase continuous
geometric_mean=cmath.sqrt(S21*S12)
root_select=1
phase_new=cmath.phase(geometric_mean)
# if the phase jumps by >180 but less than 270, then pick the other root
if abs(phase_new-phase_last)>math.pi/2 and abs(phase_new-phase_last)<3*math.pi/2:
root_select=-1
mean_S12_S21=root_select*cmath.sqrt(S21*S12)
s_averaged_corrected.append([frequency,S11,mean_S12_S21,mean_S12_S21,S22])
phase_last=cmath.phase(mean_S12_S21)
s_uncorrected_list=s_averaged_corrected
else:
pass
return s_uncorrected_list
def correct_sparameters_sixteen_term(sparameters_complex,sixteen_term_correction):
"""Applies the sixteen term correction to sparameters and returns a new sparameter list.
The sparameters should be a list of [frequency, S11, S21, S12, S22] where S terms are complex numbers.
The sixteen term correction should be a list of
[frequency, S11, S12, S13,S14,S21, S22,S23,S24,S31,S32,S33,S34,S41,S42,S43,S44], etc are complex numbers
Designed to use S2P.sparameter_complex and SNP.sparameter_complex"""
# first create 4 separate matrix lists for 16 term correction
s1_matrix_list=[]
s2_matrix_list=[]
s3_matrix_list=[]
s4_matrix_list=[]
# Then populate them with the right values
for index,correction in enumerate(sixteen_term_correction):
[frequency, S11, S12, S13,S14,S21, S22,S23,S24,S31,S32,S33,S34,S41,S42,S43,S44]=correction
s1_matrix_list.append([frequency,np.matrix([[S11,S12],[S21,S22]])])
s2_matrix_list.append([frequency,np.matrix([[S13,S14],[S23,S24]])])
s3_matrix_list.append([frequency,np.matrix([[S31,S32],[S41,S42]])])
s4_matrix_list.append([frequency,np.matrix([[S33,S34],[S43,S44]])])
sparameter_matrix_list=two_port_complex_to_matrix_form(sparameters_complex)
# Apply the correction
sparameter_out=[]
for index,sparameter in enumerate(sparameter_matrix_list):
frequency=sparameter[0]
s_matrix=sparameter[1]
[s11_matrix,s12_matrix,s21_matrix,s22_matrix]=[s1_matrix_list[index][1],s2_matrix_list[index][1],
s3_matrix_list[index][1],s4_matrix_list[index][1]]
corrected_s_matrix=np.linalg.inv(s21_matrix*np.linalg.inv(s_matrix-s11_matrix)*s12_matrix+s22_matrix)
# This flips S12 and S21
sparameter_out.append([frequency,corrected_s_matrix[0,0],corrected_s_matrix[1,0],
corrected_s_matrix[0,1],corrected_s_matrix[1,1]])
return sparameter_out
def uncorrect_sparameters_sixteen_term(sparameters_complex,sixteen_term_correction):
"""Removes the sixteen term correction to sparameters and returns a new sparameter list.
The sparameters should be a list of [frequency, S11, S21, S12, S22] where S terms are complex numbers.
The sixteen term correction should be a list of
[frequency, S11, S12, S13,S14,S21, S22,S23,S24,S31,S32,S33,S34,S41,S42,S43,S44], etc are complex numbers
Designed to use S2P.sparameter_complex and SNP.sparameter_complex.
Inverse of correct_sparameters_sixteen_term"""
# first create 4 separate matrix lists for 16 term correction
s1_matrix_list=[]
s2_matrix_list=[]
s3_matrix_list=[]
s4_matrix_list=[]
# Then populate them with the right values
for index,correction in enumerate(sixteen_term_correction):
[frequency, S11, S12, S13,S14,S21, S22,S23,S24,S31,S32,S33,S34,S41,S42,S43,S44]=correction
s1_matrix_list.append([frequency,np.matrix([[S11,S12],[S21,S22]])])
s2_matrix_list.append([frequency,np.matrix([[S13,S14],[S23,S24]])])
s3_matrix_list.append([frequency,np.matrix([[S31,S32],[S41,S42]])])
s4_matrix_list.append([frequency,np.matrix([[S33,S34],[S43,S44]])])
sparameter_matrix_list=two_port_complex_to_matrix_form(sparameters_complex)
# Apply the correction
sparameter_out=[]
for index,sparameter in enumerate(sparameter_matrix_list):
frequency=sparameter[0]
s_matrix=sparameter[1]
[s11_matrix,s12_matrix,s21_matrix,s22_matrix]=[s1_matrix_list[index][1],s2_matrix_list[index][1],
s3_matrix_list[index][1],s4_matrix_list[index][1]]
uncorrected_s_matrix=np.linalg.inv(np.linalg.inv(s21_matrix)*(np.linalg.inv(s_matrix)-s22_matrix)*\
np.linalg.inv(s12_matrix))+s11_matrix
# This flips S12 and S21
sparameter_out.append([frequency,uncorrected_s_matrix[0,0],uncorrected_s_matrix[1,0],
uncorrected_s_matrix[0,1],uncorrected_s_matrix[1,1]])
return sparameter_out
def correct_sparameters_twelve_term(sparameters_complex,twelve_term_correction,reciprocal=True):
"""Applies the twelve term correction to sparameters and returns a new sparameter list.
The sparameters should be a list of [frequency, S11, S21, S12, S22] where S terms are complex numbers.
The twelve term correction should be a list of
[frequency,Edf,Esf,Erf,Exf,Elf,Etf,Edr,Esr,Err,Exr,Elr,Etr] where Edf, etc are complex numbers"""
if len(sparameters_complex) != len(twelve_term_correction):
raise TypeError("s parameter and twelve term correction must be the same length")
sparameter_out=[]
phase_last=0.
for index,row in enumerate(sparameters_complex):
frequency=row[0]
Sm=np.matrix(row[1:]).reshape((2,2))
[frequency,Edf,Esf,Erf,Exf,Elf,Etf,Edr,Esr,Err,Exr,Elr,Etr]=twelve_term_correction[index]
# frequency Edf Esf Erf Exf Elf Etf Edr Esr Err Exr Elr Etr.
# print [frequency,Edf,Esf,Erf,Exf,Elf,Etf,Edr,Esr,Err,Exr,Elr,Etr]
# print Sm[0,0]
D =(1+(Sm[0,0]-Edf)*(Esf/Erf))*(1+(Sm[1,1]-Edr)*(Esr/Err))-(Sm[0,1]*Sm[1,0]*Elf*Elr)/(Etf*Etr)
# print D
S11 =(Sm[0,0]-Edf)/(D*Erf)*(1+(Sm[1,1]-Edr)*(Esr/Err))-(Sm[0,1]*Sm[1,0]*Elf)/(D*Etf*Etr)
S21 =((Sm[1,0]-Exr)/(D*Etf))*(1+(Sm[1,1]-Edr)*(Esr-Elf)/Err)
S12 = ((Sm[0,1]-Exf)/(D*Etr))*(1+(Sm[0,0]-Edf)*(Esf-Elr)/Erf)
S22 = (Sm[1,1]-Edr)/(D*Err)*(1+(Sm[0,0]-Edf)*(Esf/Erf))-(Sm[0,1]*Sm[1,0]*Elr)/(D*Etf*Etr)
# S12 and S21 are averaged together in a weird way that makes phase continuous
geometric_mean=cmath.sqrt(S21*S12)
root_select=1
phase_new=cmath.phase(geometric_mean)
# if the phase jumps by >180 but less than 270, then pick the other root
if abs(phase_new-phase_last)>math.pi/2 and abs(phase_new-phase_last)<3*math.pi/2:
root_select=-1
mean_S12_S21=root_select*cmath.sqrt(S21*S12)
if reciprocal:
sparameter_out.append([frequency,S11,mean_S12_S21,mean_S12_S21,S22])
else:
sparameter_out.append([frequency,S11,S21,S12,S22])
phase_last=cmath.phase(mean_S12_S21)
return sparameter_out
def uncorrect_sparameters_twelve_term(sparameters_complex,twelve_term_correction,reciprocal=True):
"""Removes the twelve term correction to sparameters and returns a new sparameter list.
The sparameters should be a list of [frequency, S11, S21, S12, S22] where S terms are complex numbers.
The twelve term correction should be a list of
[frequency,Edf,Esf,Erf,Exf,Elf,Etf,Edr,Esr,Err,Exr,Elr,Etr] where Edf, etc are complex numbers"""
if len(sparameters_complex) != len(twelve_term_correction):
raise TypeError("s parameter and twelve term correction must be the same length")
sparameter_out=[]
phase_last=0.
for index,row in enumerate(sparameters_complex):
frequency=row[0]
Sa=np.matrix(row[1:]).reshape((2,2))
[frequency,Edf,Esf,Erf,Exf,Elf,Etf,Edr,Esr,Err,Exr,Elr,Etr]=twelve_term_correction[index]
# frequency Edf Esf Erf Exf Elf Etf Edr Esr Err Exr Elr Etr.
# print [frequency,Edf,Esf,Erf,Exf,Elf,Etf,Edr,Esr,Err,Exr,Elr,Etr]
# print Sm[0,0]
delta=Sa[0,0]*Sa[1,1]-Sa[0,1]*Sa[1,0]
# print D
S11 =Edf+(Erf)*(Sa[0,0]-Elf*delta)/(1-Esf*Sa[0,0]-Elf*Sa[1,1]+Esf*Elf*delta)
S21 =Etf*(Sa[1,0])/(1-Esf*Sa[0,0]-Elf*Sa[1,1]-Esf*Elf*delta)
S12 = Etr*(Sa[0,1])/(1-Elr*Sa[0,0]-Esr*Sa[1,1]-Esr*Elr*delta)
S22 = Edr+Err*(Sa[1,1]-Elr*delta)/(1-Elr*Sa[0,0]-Esr*Sa[1,1]-Esr*Elr*delta)
# S12 and S21 are averaged together in a weird way that makes phase continuous
geometric_mean=cmath.sqrt(S21*S12)
root_select=1
phase_new=cmath.phase(geometric_mean)
# if the phase jumps by >180 but less than 270, then pick the other root
if abs(phase_new-phase_last)>math.pi/2 and abs(phase_new-phase_last)<3*math.pi/2:
root_select=-1
mean_S12_S21=root_select*cmath.sqrt(S21*S12)
if reciprocal:
sparameter_out.append([frequency,S11,mean_S12_S21,mean_S12_S21,S22])
else:
sparameter_out.append([frequency,S11,S21,S12,S22])
phase_last=cmath.phase(mean_S12_S21)
return sparameter_out
#TODO: Check that this works the way it should
def correct_sparameters(sparameters,correction,**options):
"""Correction sparamters trys to return a corrected set of sparameters given uncorrected sparameters
and a correction. Correct sparameters will accept file_name's, pyMez classes,
complex lists or a mixture, returns value in the form it was entered. Correction is assumed reciprocal
unless reciprocal=False"""
defaults={"reciprocal":True,"output_type":None,"file_path":None}
correction_options={}
for key,value in defaults.items():
correction_options[key]=value
for key,value in options.items():
correction_options[key]=value
try:
# import and condition sparameters and correction
if isinstance(sparameters, StringType):
# Assume sparameters is given by file name
sparameters_table=S2PV1(sparameters)
sparameters=sparameters_table.sparameter_complex
output_type='file'
elif re.search('S2PV1',type(sparameters)):
output_type='S2PV1'
sparameters=sparameters.sparameter_complex
elif isinstance(sparameters, ListType):
# check to see if it is a list of complex variables or matrix
if isinstance(sparameters[1], ComplexType):
output_type='complex_list'
# Handle frequency, matrix lists
elif type(sparameters[1]) in ['np.array','np.matrix'] and isinstance(sparameters, FloatType) :
output_type='matrix_list'
sparameters=two_port_matrix_to_complex_form(sparameters)
# handle matrix
elif type(sparameters) in ['np.array','np.matrix']:
output_type='matrix'
raise
# Handle the correction types
if len(correction) is 13:
corrected_sparameters=correct_sparameters_twelve_term(sparameters,correction)
elif len(correction) is 17:
corrected_sparameters=correct_sparameters_sixteen_term(sparameters,correction)
elif len(correction) is 9:
corrected_sparameters=correct_sparameters_eight_term(sparameters,correction)
# Handle the output type using the derived one or the one entered as an option
if correction_options["output_type"] is None:
pass
else:
output_type=correction_options["output_type"]
if re.match('file',output_type, re.IGNORECASE):
output_table=S2PV1(correction_options["file_path"],sparameter_complex=corrected_sparameters)
output_table.save()
print(("Output was saved as {0}".format(output_table.path)))
elif re.search("complex",output_type,re.IGNORECASE):
return corrected_sparameters
elif re.search("matrix_list",output_type,re.IGNORECASE):
return two_port_complex_to_matrix_form(corrected_sparameters)
elif re.search("matrix",output_type,re.IGNORECASE):
raise
except:
print("Could not correct sparameters")
raise
def average_one_port_sparameters(table_list,**options):
"""Returns a table that is the average of the Sparameters in table list. The new table will have all the unique
frequency values contained in all of the tables. Tables must be in Real-Imaginary format or magnitude-angle format
do not try to average db-angle format. """
#This will work on any table that the data is stored in data, need to add a sparameter version
defaults={"frequency_selector":0,"frequency_column_name":"Frequency"}
average_options={}
for key,value in defaults.items():
average_options[key]=value
for key,value in options.items():
average_options[key]=value
frequency_list=[]
average_data=[]
for table in table_list:
frequency_list=frequency_list+table.get_column("Frequency")
unique_frequency_list=sorted(list(set(frequency_list)))
for frequency in unique_frequency_list:
new_row=[]
for table in table_list:
data_list=[x for x in table.data if x[average_options["frequency_selector"]]==frequency]
table_average=np.mean(np.array(data_list),axis=0)
new_row.append(table_average)
#print new_row
average_data.append(np.mean(new_row,axis=0).tolist())
return average_data
def two_port_comparison_plot_with_residuals(two_port_raw,mean_frame,difference_frame):
"""Creates a comparison plot given a TwoPortRawModel object and a pandas.DataFrame mean frame"""
fig, axes = plt.subplots(nrows=3, ncols=2, sharex='col',figsize=(8,6),dpi=80)
measurement_date=two_port_raw.metadata["Measurement_Date"]
ax0,ax1,ax2,ax3,ax4,ax5 = axes.flat
compare_axes=[ax0,ax1,ax2,ax3,ax4,ax5]
diff_axes=[]
for ax in compare_axes:
diff_axes.append(ax.twinx())
#diff_axes=[diff_ax0,diff_ax1,diff_ax2,diff_ax3,diff_ax4,diff_ax5]
column_names=['Frequency','magS11','argS11','magS21','argS21','magS22','argS22']
for index,ax in enumerate(diff_axes):
ax.plot(difference_frame['Frequency'].tolist(),difference_frame[column_names[index+1]].tolist(),'r-x')
ax.set_ylabel('Difference',color='red')
if re.search('mag',column_names[index+1]):
ax.set_ylim(-.02,.02)
#ax.legend_.remove()
for index, ax in enumerate(compare_axes):
ax.plot(two_port_raw.get_column('Frequency'),two_port_raw.get_column(column_names[index+1]),
'k-o',label=measurement_date)
ax.plot(mean_frame['Frequency'].tolist(),mean_frame[column_names[index+1]].tolist(),'gs',label='Mean')
ax.set_title(column_names[index+1])
ax.legend(loc=1,fontsize='8')
#ax.xaxis.set_visible(False)
if re.search('arg',column_names[index+1]):
ax.set_ylabel('Phase(Degrees)',color='green')
elif re.search('mag',column_names[index+1]):
ax.set_ylabel(r'|${\Gamma} $|',color='green')
#ax.sharex(diff_axes[index])
ax4.set_xlabel('Frequency(GHz)',color='k')
ax5.set_xlabel('Frequency(GHz)',color='k')
fig.subplots_adjust(hspace=0)
fig.suptitle(two_port_raw.metadata["Device_Id"]+"\n",fontsize=18,fontweight='bold')
plt.tight_layout()
plt.show()
return fig
def two_port_difference_frame(two_port_raw,mean_frame):
"""Creates a difference pandas.DataFrame given a two port raw file and a mean pandas.DataFrame"""
difference_list=[]
for row in two_port_raw.data[:]:
#print row[0]
mean_row=mean_frame[abs(mean_frame["Frequency"]-row[0])<abs(.01)].as_matrix()
#print mean_row
try:
mean_row=mean_row[0]
difference_row=[row[i+2]-mean_row[i] for i in range(1,len(mean_row))]
difference_row.insert(0,row[0])
difference_list.append(difference_row)
except:pass
column_names=['Frequency','magS11','argS11','magS21','argS21','magS22','argS22']
diff_data_frame=pandas.DataFrame(difference_list,columns=column_names)
return diff_data_frame
def two_port_mean_frame(device_id,system_id=None,history_data_frame=None):
"""Given a Device_Id and a pandas data frame of the history creates a mean data_frame"""
device_history=history_data_frame[history_data_frame["Device_Id"]==device_id]
if system_id is not None:
device_history=device_history[device_history["System_Id"]==system_id]
column_names=['Frequency','magS11','argS11','magS21','argS21','magS22','argS22']
unique_frequency_list=device_history["Frequency"].unique()
mean_array=[]
for index,freq in enumerate(unique_frequency_list):
row=[]
for column in column_names:
values=np.mean(device_history[device_history["Frequency"]==unique_frequency_list[index]][column].as_matrix())
#print values
mean_value=np.mean(values)
row.append(mean_value)
mean_array.append(row)
mean_frame=pandas.DataFrame(mean_array,columns=column_names)
return mean_frame
def mean_from_history(history_frame,**options):
"""mean_from_history creates a mean_frame given a full history frame (pandas.DataFrame object),
by setting options it selects column names
to output and input values to filter on. Returns a pandas.DataFrame object with column names = column_names,
and filtered by any of the following: "Device_Id","System_Id","Measurement_Timestamp",
"Connector_Type_Measurement", "Measurement_Date" or "Measurement_Time" """
defaults={"Device_Id":None, "System_Id":None,"Measurement_Timestamp":None,
"Connector_Type_Measurement":None,
"Measurement_Date":None,"Measurement_Time":None,"Direction":None,
"column_names":['Frequency','magS11','argS11'],"outlier_removal":True}
mean_options={}
for key,value in defaults.items():
mean_options[key]=value
for key,value in options.items():
mean_options[key]=value
filters=["Device_Id","System_Id","Measurement_Timestamp","Connector_Type_Measurement",
"Measurement_Date","Measurement_Time","Direction"]
temp_frame=history_frame.copy()
for index,filter_type in enumerate(filters):
if mean_options[filter_type] is not None:
temp_frame=temp_frame[temp_frame[filter_type]==mean_options[filter_type]]
# temp_frame=temp_frame[temp_frame["Device_Id"]==mean_options["Device_Id"]]
# temp_frame=temp_frame[temp_frame["System_Id"]==mean_options["System_Id"]]
if mean_options["outlier_removal"]:
mean_s11=np.mean(temp_frame["magS11"])
std_s11=np.std(temp_frame["magS11"])
temp_frame=temp_frame[temp_frame["magS11"]<(mean_s11+3*std_s11)]
temp_frame = temp_frame[temp_frame["magS11"] > (mean_s11 - 3 * std_s11)]
unique_frequency_list=temp_frame["Frequency"].unique()
mean_array=[]
for index,freq in enumerate(unique_frequency_list):
row=[]
for column in mean_options["column_names"]:
values=np.mean(temp_frame[temp_frame["Frequency"]==unique_frequency_list[index]][column].as_matrix())
mean_value=np.mean(values)
row.append(mean_value)
mean_array.append(row)
mean_frame=pandas.DataFrame(mean_array,columns=mean_options["column_names"])
return mean_frame
def median_from_history(history_frame,**options):
"""median_from_history creates a median_frame given a full history frame (pandas.DataFrame object),
by setting options it selects column names
to output and input values to filter on. Returns a pandas.DataFrame object with column names = column_names,
and filtered by any of the following: "Device_Id","System_Id","Measurement_Timestamp",
"Connector_Type_Measurement", "Measurement_Date" or "Measurement_Time" """
defaults={"Device_Id":None, "System_Id":None,"Measurement_Timestamp":None,
"Connector_Type_Measurement":None,
"Measurement_Date":None,"Measurement_Time":None,"Direction":None,
"column_names":['Frequency','magS11','argS11'],"outlier_removal":True}
median_options={}
for key,value in defaults.items():
median_options[key]=value
for key,value in options.items():
median_options[key]=value
filters=["Device_Id","System_Id","Measurement_Timestamp","Connector_Type_Measurement",
"Measurement_Date","Measurement_Time","Direction"]
temp_frame=history_frame.copy()
for index,filter_type in enumerate(filters):
if median_options[filter_type] is not None:
temp_frame=temp_frame[temp_frame[filter_type]==median_options[filter_type]]
if median_options["outlier_removal"]:
mean_s11=np.mean(temp_frame["magS11"])
std_s11=np.std(temp_frame["magS11"])
temp_frame=temp_frame[temp_frame["magS11"]<(mean_s11+3*std_s11)]
temp_frame = temp_frame[temp_frame["magS11"] > (mean_s11 - 3 * std_s11)]
# temp_frame=temp_frame[temp_frame["Device_Id"]==median_options["Device_Id"]]
# temp_frame=temp_frame[temp_frame["System_Id"]==median_options["System_Id"]]
unique_frequency_list=temp_frame["Frequency"].unique()
median_array=[]
for index,freq in enumerate(unique_frequency_list):
row=[]
for column in median_options["column_names"]:
values=np.median(temp_frame[temp_frame["Frequency"]==unique_frequency_list[index]][column].as_matrix())
median_value=np.median(values)
row.append(median_value)
median_array.append(row)
median_frame=pandas.DataFrame(median_array,columns=median_options["column_names"])
return median_frame
def raw_difference_frame(raw_model,mean_frame,**options):
"""Creates a difference pandas.DataFrame given a raw NIST model and a mean pandas.DataFrame"""
defaults={"column_names":mean_frame.columns.tolist()}
difference_options={}
for key,value in defaults.items():
difference_options[key]=value
for key,value in options.items():
difference_options[key]=value
difference_list=[]
for row in raw_model.data[:]:
#print row[0]
mean_row=mean_frame[abs(mean_frame["Frequency"]-row[0])<abs(.01)].as_matrix()
#print mean_row
try:
mean_row=mean_row[0]
difference_row=[row[i+2]-mean_row[i] for i in range(1,len(mean_row))]
difference_row.insert(0,row[0])
difference_list.append(difference_row)
except:pass
difference_data_frame=pandas.DataFrame(difference_list,columns=difference_options["column_names"])
return difference_data_frame
def return_history_key(calrep_model):
"Returns a key for the history dictionary given a calrep model"
model=calrep_model.__class__.__name__
#print model
if re.search('Calrep|DUT',model):
if re.search('OnePortCalrep',model):
return '1-port calrep'
elif re.search('TwoPortCalrep',model):
return '2-port calrep'
elif re.search('PowerCalrep',model):
if calrep_model.options["column_names"]==POWER_3TERM_COLUMN_NAMES:
return 'power 3term calrep'
elif calrep_model.options["column_names"]==POWER_4TERM_COLUMN_NAMES:
return 'power 4term calrep'
elif re.search('OnePortDUT',model):
return 'power 3term calrep'
else:
raise TypeError("Must be a calrep model, such as OnePortCalrepModel, etc. ")
def raw_comparison_plot_with_residuals(raw_nist,mean_frame,difference_frame,**options):
"""Creates a comparison plot given a RawModel object and a pandas.DataFrame mean frame and difference frame"""
defaults={"display_mean":True,
"display_difference":True,
"display_raw":True,
"display_legend":True,
"save_plot":False,
"directory":None,
"specific_descriptor":raw_nist.metadata["Device_Id"]+"_Check_Standard",
"general_descriptor":"Plot","file_name":None}
comparison_plot_options={}
for key,value in defaults.items():
comparison_plot_options[key]=value
for key,value in options.items():
comparison_plot_options[key]=value
column_names=mean_frame.columns.tolist()
number_rows=int(len(column_names)/2)
fig, compare_axes = plt.subplots(nrows=number_rows, ncols=2, sharex='col',figsize=(8,6),dpi=80)
measurement_date=raw_nist.metadata["Measurement_Date"]
diff_axes=[]
for ax in compare_axes.flat:
diff_axes.append(ax.twinx())
#diff_axes=[diff_ax0,diff_ax1,diff_ax2,diff_ax3,diff_ax4,diff_ax5]
if comparison_plot_options["display_difference"]:
for index,ax in enumerate(diff_axes):
ax.plot(difference_frame['Frequency'].tolist(),difference_frame[column_names[index+1]].tolist(),'r-x')
ax.set_ylabel('Difference',color='red')
if re.search('mag',column_names[index+1]):
ax.set_ylim(-.02,.02)
#ax.legend_.remove()
for index, ax in enumerate(compare_axes.flat):
if comparison_plot_options["display_raw"]:
ax.plot(raw_nist.get_column('Frequency'),raw_nist.get_column(column_names[index+1]),
'k-o',label=measurement_date)
if comparison_plot_options["display_mean"]:
ax.plot(mean_frame['Frequency'].tolist(),mean_frame[column_names[index+1]].tolist(),'gs',label='Mean')
ax.set_title(column_names[index+1])
if comparison_plot_options["display_legend"]:
ax.legend(loc=1,fontsize='8')
#ax.xaxis.set_visible(False)
if re.search('arg',column_names[index+1]):
ax.set_ylabel('Phase(Degrees)',color='green')
elif re.search('mag',column_names[index+1]):
ax.set_ylabel(r'|${\Gamma} $|',color='green')
#ax.sharex(diff_axes[index])
compare_axes.flat[-2].set_xlabel('Frequency(GHz)',color='k')
compare_axes.flat[-1].set_xlabel('Frequency(GHz)',color='k')
fig.subplots_adjust(hspace=0)
fig.suptitle(raw_nist.metadata["Device_Id"]+"\n",fontsize=18,fontweight='bold')
plt.tight_layout()
if comparison_plot_options["file_name"] is None:
file_name=auto_name(specific_descriptor=comparison_plot_options["specific_descriptor"],
general_descriptor=comparison_plot_options["general_descriptor"],
directory=comparison_plot_options["directory"],extension='png',padding=3)
else:
file_name=comparison_plot_options["file_name"]
if comparison_plot_options["save_plot"]:
#print file_name
plt.savefig(os.path.join(comparison_plot_options["directory"],file_name))
else:
plt.show()
return fig
def calrep_history_plot(calrep_model,history_frame,**options):
"""Given a calrep_model and a history frame calrep_history_plot plots the file against any other in history
frame (pandas.DataFrame) with dates"""
defaults={"display_legend":True,
"save_plot":False,
"directory":None,
"specific_descriptor":calrep_model.metadata["Device_Id"]+"_Device_Measurement",
"general_descriptor":"Plot",
"file_name":None,
"min_num":0,
"max_num":None,
"error_style":"area"}
history_plot_options={}
for key,value in defaults.items():
history_plot_options[key]=value
for key,value in options.items():
history_plot_options[key]=value
# The way we plot depends on the models
model=calrep_model.__class__.__name__
# The new method relies on metadata and not the class
if re.search("DataTable",model,re.IGNORECASE):
try:
if calrep_model.metadata["Measurement_Type"] in ['1-port']:
model="OnePort"
elif calrep_model.metadata["Measurement_Type"] in ['2-port']:
model="TwoPort"
elif re.search('Dry Cal|Thermistor|power',calrep_model.metadata["Measurement_Type"]):
model="Power"
except:
pass
device_history=history_frame[history_frame["Device_Id"]==calrep_model.metadata["Device_Id"]]
unique_analysis_dates=sorted(device_history["Analysis_Date"].unique().tolist())
print(("{0} are {1}".format("unique_analysis_dates",unique_analysis_dates)))
if re.search('Power',model):
number_rows=2
column_names=['magS11','argS11','Efficiency','Calibration_Factor']
if calrep_model.options["column_names"]==POWER_3TERM_COLUMN_NAMES:
error_names=['uMgS11','uAgS11','uEe','uCe']
elif calrep_model.options["column_names"]==POWER_4TERM_COLUMN_NAMES:
error_names=['uMgS11','uAgS11','uEg','uCg']
table=calrep_model.joined_table
elif re.search('OnePort',model):
number_rows=1
column_names=['magS11','argS11']
error_names=['uMgS11','uAgS11']
table=calrep_model
elif re.search('TwoPort',model):
number_rows=3
column_names=['magS11','argS11','magS21','argS21','magS22','argS22']
error_names=['uMgS11','uAgS11','uMgS21','uAgS21','uMgS22','uAgS22']
table=calrep_model.joined_table
fig, compare_axes = plt.subplots(nrows=number_rows, ncols=2, sharex='col',figsize=(8,6),dpi=80)
for index, ax in enumerate(compare_axes.flat):
#ax.xaxis.set_visible(False)
if re.search('arg',column_names[index]):
ax.set_ylabel('Phase(Degrees)',color='green')
elif re.search('mag',column_names[index]):
ax.set_ylabel(r'|${\Gamma} $|',color='green')
ax.set_title(column_names[index])
# initial plot of
x=table.get_column('Frequency')
y=np.array(table.get_column(column_names[index]))
error=np.array(table.get_column(error_names[index]))
if re.search('bar',history_plot_options["error_style"],re.IGNORECASE):
ax.errorbar(x,y,yerr=error,fmt='k--')
for date_index,date in enumerate(unique_analysis_dates[history_plot_options["min_num"]:history_plot_options["max_num"]]):
number_lines=len(unique_analysis_dates[history_plot_options["min_num"]:history_plot_options["max_num"]])
date_device_history=device_history[device_history["Analysis_Date"]==date]
if not date_device_history.empty:
x_date=date_device_history['Frequency']
y_date=np.array(date_device_history[column_names[index]].tolist())
error_date=np.array(date_device_history[error_names[index]].tolist())
#print("{0} is {1}".format("date_device_history",date_device_history))
#print("{0} is {1}".format("y_date",y_date))
#print("{0} is {1}".format("date",date))
date_color=(1-float(date_index+1)/number_lines,0,float(date_index+1)/number_lines,.5)
ax.errorbar(x_date,y_date,
yerr=error_date,color=date_color,label=date)
elif re.search('area',history_plot_options["error_style"],re.IGNORECASE):
ax.plot(x,y,'k--')
ax.fill_between(x,y-error,y+error,edgecolor=(0,.0,.0,.25), facecolor=(.25,.25,.25,.1),
linewidth=1)
for date_index,date in enumerate(unique_analysis_dates[history_plot_options["min_num"]:history_plot_options["max_num"]]):
number_lines=float(len(unique_analysis_dates[history_plot_options["min_num"]:history_plot_options["max_num"]]))
#print("{0} is {1}".format("number_lines",number_lines))
#print("{0} is {1}".format("index",index))
#print("{0} is {1}".format("date_index",date_index))
date_color=(1-float(date_index+1)/number_lines,0,float(date_index+1)/number_lines,.5)
#print("{0} is {1}".format("date_color",date_color))
date_device_history=device_history[device_history["Analysis_Date"]==date]
x_date=date_device_history['Frequency']
y_date=np.array(date_device_history[column_names[index]].tolist())
error_date=np.array(date_device_history[error_names[index]].tolist())
ax.plot(x_date,y_date,
color=date_color,label=date)
#ax.sharex(diff_axes[index])
if history_plot_options["display_legend"]:
ax.legend(loc=1,fontsize='8')
compare_axes.flat[-2].set_xlabel('Frequency(GHz)',color='k')
compare_axes.flat[-1].set_xlabel('Frequency(GHz)',color='k')
fig.subplots_adjust(hspace=0)
fig.suptitle(calrep_model.metadata["Device_Id"]+"\n",fontsize=18,fontweight='bold')
plt.tight_layout()
# Dealing with the save option
if history_plot_options["file_name"] is None:
file_name=auto_name(specific_descriptor=history_plot_options["specific_descriptor"],
general_descriptor=history_plot_options["general_descriptor"],
directory=history_plot_options["directory"],extension='png',padding=3)
else:
file_name=history_plot_options["file_name"]
if history_plot_options["save_plot"]:
#print file_name
plt.savefig(os.path.join(history_plot_options["directory"],file_name))
else:
plt.show()
return fig
def compare_s2p_plots(list_S2PV1,**options):
"""compare_s2p_plot compares a list of s2p files plotting each on the same axis for all
8 possible components. The format of plots can be changed by passing options as key words in a
key word dictionary. """
defaults={"format":"MA",
"display_legend":True,
"save_plot":False,
"directory":None,
"specific_descriptor":"comparison_Plot",
"general_descriptor":"Plot",
"file_name":None,
"labels":None,
"title":None,
"grid":True}
comparison_plot_options={}
for key,value in defaults.items():
comparison_plot_options[key]=value
for key,value in options.items():
comparison_plot_options[key]=value
# create a set of 8 subplots
#plt.hold(True)
fig, compare_axes = plt.subplots(nrows=4, ncols=2, figsize=(8,6),dpi=80)
if comparison_plot_options["labels"] is None:
labels=[s2p.path for s2p in list_S2PV1]
else:
labels=comparison_plot_options["labels"]
for s2p_index,s2p in enumerate(list_S2PV1):
# start by changing the format of all the s2p
s2p.change_data_format(comparison_plot_options["format"])
column_names=s2p.column_names[1:]
for index, ax in enumerate(compare_axes.flat):
#ax.xaxis.set_visible(False)
if re.search('arg',column_names[index]):
ax.set_ylabel('Phase(Degrees)',color='black')
elif re.search('mag',column_names[index]):
ax.set_ylabel(r'|{0}|'.format(column_names[index].replace("mag","")),color='green')
if comparison_plot_options["grid"]:
ax.grid(True)
ax.set_title(column_names[index])
# initial plot of
x=s2p.get_column('Frequency')
y=np.array(s2p.get_column(column_names[index]))
ax.plot(x,y,label=labels[s2p_index])
if comparison_plot_options["display_legend"]:
if index == 1:
ax.legend(loc="center left", bbox_to_anchor=(1.05, .5),
shadow=True,
fancybox=True)
compare_axes.flat[-2].set_xlabel('Frequency(GHz)',color='k')
compare_axes.flat[-1].set_xlabel('Frequency(GHz)',color='k')
if comparison_plot_options["title"]:
fig.suptitle(comparison_plot_options["title"])
fig.subplots_adjust(hspace=0)
plt.tight_layout()
# Dealing with the save option
if comparison_plot_options["file_name"] is None:
file_name=auto_name(specific_descriptor=comparison_plot_options["specific_descriptor"],
general_descriptor=comparison_plot_options["general_descriptor"],
directory=comparison_plot_options["directory"]
,extension='png',padding=3)
else:
file_name=comparison_plot_options["file_name"]
if comparison_plot_options["save_plot"]:
#print file_name
plt.savefig(os.path.join(comparison_plot_options["directory"],file_name))
else:
plt.show()
return fig
def return_calrep_value_column_names(calrep_model):
"""Returns the column names for values in a calrep model. For example if the
calrep model is a 1-port, then it returns ["magS11","argS11"] """
measurement_type = calrep_model.metadata["Measurement_Type"]
if re.search('1|one', measurement_type, re.IGNORECASE):
column_names = ['magS11', 'argS11']
elif re.search('2|two', measurement_type, re.IGNORECASE):
if re.search('NR', measurement_type, re.IGNORECASE):
column_names = ['magS11', 'argS11', 'magS12', 'argS12', 'magS21', 'argS21', 'magS22', 'argS22']
else:
column_names = ['magS11', 'argS11', 'magS21', 'argS21', 'magS22', 'argS22']
else:
column_names = ['magS11', 'argS11', 'Efficiency']
return column_names
def return_calrep_error_column_names(calrep_model_value_columns,error_suffix='g'):
"""Returns the column names for errors in a calrep model. For example if the
calrep model value column names are ["magS11","argS11"], then it returns ["uMgS11","uAgS11"] """
error_columns = []
for column in calrep_model_value_columns[:]:
error_column = column.replace("mag", "uM" + error_suffix)
error_column = error_column.replace("arg", "uA" + error_suffix)
error_column = error_column.replace("Efficiency", "uE" + error_suffix)
error_columns.append(error_column)
return error_columns
def plot_frequency_model(frequency_model, **options):
"""Plots any table with frequency as its x-axis and column_names as the x-axis in a
series of subplots"""
defaults = {"display_legend": False,
"save_plot": False,
"directory": None,
"specific_descriptor": "Frequency_Model",
"general_descriptor": "Plot",
"file_name": None,
"plots_per_column": 2,
"plot_format": 'b-o',
"share_x": False,
"subplots_title": True,
"plot_title": None,
"plot_size": (8, 6),
"dpi": 80}
plot_options = {}
for key, value in defaults.items():
plot_options[key] = value
for key, value in options.items():
plot_options[key] = value
if type(frequency_model) in [pandas.DataFrame]:
frequency_model = DataFrame_to_AsciiDataTable(frequency_model)
x_data = np.array(frequency_model["Frequency"])
y_data_columns = frequency_model.column_names[:]
y_data_columns.remove("Frequency")
number_plots = len(y_data_columns)
number_columns = plot_options["plots_per_column"]
number_rows = int(round(float(number_plots) / float(number_columns)))
figure, axes = plt.subplots(ncols=number_columns, nrows=number_rows, sharex=plot_options["share_x"],
figsize=plot_options["plot_size"], dpi=plot_options["dpi"])
for plot_index, ax in enumerate(axes.flat):
if plot_index < number_plots:
y_data = np.array(frequency_model[y_data_columns[plot_index]])
ax.plot(x_data, y_data, plot_options["plot_format"], label=y_data_columns[plot_index])
if plot_options["display_legend"]:
ax.legend()
if plot_options["subplots_title"]:
ax.set_title(y_data_columns[plot_index])
else:
pass
if plot_options["plot_title"]:
plt.suptitle(plot_options["plot_title"])
plt.tight_layout()
# Dealing with the save option
if plot_options["file_name"] is None:
file_name = auto_name(specific_descriptor=plot_options["specific_descriptor"],
general_descriptor=plot_options["general_descriptor"],
directory=plot_options["directory"], extension='png', padding=3)
else:
file_name = plot_options["file_name"]
if plot_options["save_plot"]:
# print file_name
plt.savefig(os.path.join(plot_options["directory"], file_name))
else:
plt.show()
return figure
def plot_frequency_model_histogram(frequency_model, **options):
"""Plots any table with frequency as its x-axis and column_names as the x-axis in a
series of subplots"""
defaults = {"display_legend": False,
"save_plot": False,
"directory": None,
"specific_descriptor": "Frequency_Model",
"general_descriptor": "Plot",
"file_name": None,
"plots_per_column": 2,
"plot_format": 'b-o',
"share_x": False,
"subplots_title": True,
"plot_title": None,
"plot_size": (8, 6),
"dpi": 80,
"non_plotable_text": "Not Plotable"}
plot_options = {}
for key, value in defaults.items():
plot_options[key] = value
for key, value in options.items():
plot_options[key] = value
if type(frequency_model) in [pandas.DataFrame]:
frequency_model = DataFrame_to_AsciiDataTable(frequency_model)
x_data = np.array(frequency_model["Frequency"])
y_data_columns = frequency_model.column_names[:]
y_data_columns.remove("Frequency")
number_plots = len(y_data_columns)
number_columns = plot_options["plots_per_column"]
number_rows = int(round(float(number_plots) / float(number_columns)))
figure, axes = plt.subplots(ncols=number_columns, nrows=number_rows, sharex=plot_options["share_x"],
figsize=plot_options["plot_size"], dpi=plot_options["dpi"])
for plot_index, ax in enumerate(axes.flat):
if plot_index < number_plots:
try:
y_data = | np.array(frequency_model[y_data_columns[plot_index]]) | numpy.array |
# %% Change working directory from the workspace root to the ipynb file location. Turn this addition off with the DataScience.changeDirOnImportExport setting
# ms-python.python added
from mgcpy.independence_tests.mdmr import MDMR
from mgcpy.independence_tests.kendall_spearman import KendallSpearman
from mgcpy.independence_tests.hhg import HHG
from mgcpy.independence_tests.rv_corr import RVCorr
from mgcpy.independence_tests.mgc import MGC
from mgcpy.benchmarks.simulations import *
from mgcpy.benchmarks.power import power, power_given_data
from mgcpy.independence_tests.dcorr import DCorr
import sys
import os
try:
os.chdir(os.path.join(os.getcwd(), 'demos'))
print(os.getcwd())
except:
pass
# %% [markdown]
# This notebook contains the reproduction of the power curves in figure 2 of the paper "Discovering and Deciphering Relationships Across Disparate Data Modalities". Plots here include:
# * the difference between the power computed using ``mgcpy`` and the paper results, using data pre-generated with the matlab code in the github repository ``mgc-paper``
# * partially completed power curves generated using ``mgcpy``
# %% [markdown]
# ### How to add new power curves?
# * Read the comments in the following functions and make edits accordingly
# * For power curves with increasing dimensions
# * fill_params_dict_list_dimensions()
# * For power curves with increasing sample size
# * fill_params_dict_list_sample_size()
# * plot_all_curves for plotting
# * Run the block of code as indicated in markdown
# * Note that you don't need to run all the previous tests e.g. mgc, dcorr, etc. They are already stored.
# %%
import numpy as np
from scipy.spatial.distance import pdist, squareform
import seaborn as sns
import matplotlib.pyplot as plt
import pickle
from numpy import genfromtxt
import pandas as pd
from sklearn.externals.joblib import Parallel, delayed
import multiprocessing as mp
import scipy.io
#from tqdm import tqdm_notebook as tqdm
import h5py
import math
from scipy.ndimage.filters import gaussian_filter1d
# %%
sns.color_palette('Set1')
sns.set(color_codes=True, style='white', context='talk', font_scale=2)
# %%
module_path = '/Users/spanda/Essential/580.638 - NeuroData II/mgcpy'
if module_path not in sys.path:
sys.path.append(module_path)
# %%
def find_dim(sim_name):
dim = 0
if sim_name in ['joint_normal', 'sine_4pi', 'sine_16pi', 'multi_noise']:
dim = 10
elif sim_name in ['step', 'spiral', 'circle', 'ellipse', 'quadratic', 'w_shape', 'two_parabolas', 'fourth_root']:
dim = 20
elif sim_name in ['multi_indept', 'bernoulli', 'log']:
dim = 100
elif sim_name in ['linear', 'exponential', 'cubic']:
dim = 1000
else:
dim = 40
return dim
# %%
simulations = {'joint_normal': (joint_sim, 4), 'sine_4pi': (sin_sim, 12), 'sine_16pi': (sin_sim, 13), 'multi_noise': (multi_noise_sim, 19),
'step': (step_sim, 5), 'spiral': (spiral_sim, 8), 'circle': (circle_sim, 16), 'ellipse': (circle_sim, 17), 'diamond': (square_sim, 18),
'log': (log_sim, 10), 'quadratic': (quad_sim, 6), 'w_shape': (w_sim, 7), 'two_parabolas': (two_parab_sim, 15), 'fourth_root': (root_sim, 11),
'multi_indept': (multi_indep_sim, 20), 'bernoulli': (ubern_sim, 9), 'square': (square_sim, 14),
'linear': (linear_sim, 1), 'exponential': (exp_sim, 2), 'cubic': (cub_sim, 3)
}
# %%
def find_dim_range(dim):
if dim < 20:
lim = 10
else:
lim = 20
dim_range = np.arange(math.ceil(dim/lim), dim+1, math.ceil(dim/lim))
if math.ceil(dim/lim) != 1:
dim_range = np.insert(dim_range, 0, 1)
lim = dim_range.shape[0]
return (lim, dim_range)
# %% [markdown]
# ## Parallel code
# %%
def power_vs_dimension_parallel(params_dict):
test = params_dict['independence_test']
sim = params_dict['simulation_type']
print(sim, test.get_name())
dim = params_dict['dim']
lim, dim_range = find_dim_range(dim)
estimated_power = np.zeros(lim)
for i in range(lim):
estimated_power[i] = power(test, sim, num_samples=100, num_dimensions=dim_range[i])
np.savetxt('../mgcpy/benchmarks/python_power_curves_dimensions/fast_mgc/{}_{}_dimensions.csv'.format(sim, test.get_name()), estimated_power, delimiter=',')
print('{} {} finished'.format(sim, test.get_name()))
return (params_dict, estimated_power)
# %%
def fill_params_dict_list_dimensions():
mcorr = DCorr(which_test='unbiased')
dcorr = DCorr(which_test='biased')
mantel = DCorr(which_test='mantel')
mgc = MGC()
rv_corr = RVCorr(which_test='rv')
hhg = HHG()
cca = RVCorr(which_test='cca')
mdmr = MDMR()
# initialize additional test
# add the test that you wish to run in the `independence_tests` list
independence_tests = [] # [mgc, mcorr, dcorr, mantel, rv_corr, cca]
params_dict_list = []
for sim_name, sim_func in simulations.items():
for test in independence_tests:
params_dict = {'independence_test': test, 'simulation_type': sim_func[1], 'dim': find_dim(sim_name)}
params_dict_list.append(params_dict)
return params_dict_list
# %%
def power_vs_sample_size_parallel(params_dict):
sample_sizes = [i for i in range(5, 101, 5)]
estimated_power = np.zeros(len(sample_sizes))
test = params_dict['independence_test']
sim = params_dict['simulation_type']
print(sim, test.get_name())
for i in range(len(sample_sizes)):
estimated_power[i] = power_given_data(test, sim, data_type='sample_size', num_samples=sample_sizes[i], num_dimensions=1)
np.savetxt('../mgcpy/benchmarks/python_power_curves_sample_size/fast_mgc/{}_{}_sample_size.csv'.format(sim, test.get_name()), estimated_power, delimiter=',')
print('{} {} finished'.format(sim, test.get_name()))
return (params_dict, estimated_power)
# %%
def fill_params_dict_list_sample_sizes():
mcorr = DCorr(which_test='unbiased')
dcorr = DCorr(which_test='biased')
mantel = DCorr(which_test='mantel')
mgc = MGC()
hhg = HHG()
pearson = RVCorr(which_test='pearson')
independence_tests = [] # [mgc, mcorr, dcorr, mantel, pearson]
params_dict_list = []
for sim_name, sim_func in simulations.items():
for test in independence_tests:
params_dict = {'independence_test': test, 'simulation_type': sim_func[1]}
params_dict_list.append(params_dict)
return params_dict_list
# %%
def fast_power_vs_dimension_parallel(params_dict):
test = params_dict['independence_test']
sim = params_dict['simulation_type']
print(sim, test.get_name())
dim = params_dict['dim']
lim, dim_range = find_dim_range(dim)
estimated_power = | np.zeros(lim) | numpy.zeros |
# Copyright 2018 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typed_python import (
Bool,
Int8, Int16, Int32, Int64,
UInt8, UInt16, UInt32, UInt64,
Float32, Float64,
NoneType, TupleOf, ListOf, OneOf, Tuple, NamedTuple, Dict,
ConstDict, Alternative, serialize, deserialize, Value, Class, Member,
TypeFilter, UndefinedBehaviorException, Function
)
from typed_python.test_util import currentMemUsageMb
import typed_python._types as _types
import psutil
import unittest
import traceback
import time
import numpy
import os
import sys
def typeFor(t):
assert not isinstance(t, list), t
return type(t)
def typeForSeveral(t):
ts = set(typeFor(a) for a in t)
if len(ts) == 1:
return list(ts)[0]
return OneOf(*ts)
def makeTupleOf(*args):
if not args:
return TupleOf(int)()
return TupleOf(typeForSeveral(args))(args)
def makeNamedTuple(**kwargs):
if not kwargs:
return NamedTuple()()
return NamedTuple(**{k:typeFor(v) for k,v in kwargs.items()})(**kwargs)
def makeTuple(*args):
if not args:
return Tuple()()
return Tuple(*[typeFor(v) for v in args])(args)
def makeDict(d):
if not d:
return ConstDict(int,int)()
return ConstDict(typeForSeveral(d.keys()), typeForSeveral(d.values()))(d)
def makeAlternative(severalDicts):
types = list(
set(
tuple(
(k,typeFor(v)) for k,v in ntDict.items()
)
for ntDict in severalDicts
)
)
alt = Alternative("Alt", **{
"a_%s" % i: dict(types[i]) for i in range(len(types))
})
res = []
for thing in severalDicts:
did = False
for i in range(len(types)):
try:
res.append(getattr(alt,"a_%s" % i)(**thing))
did = True
except Exception:
pass
if did:
break
assert len(res) == len(severalDicts)
return res
def choice(x):
#numpy.random.choice([1,(1,2)]) blows up because it looks 'multidimensional'
#so we have to pick from a list of indices
if not isinstance(x,list):
x = list(x)
return x[numpy.random.choice(list(range(len(x))))]
class RandomValueProducer:
def __init__(self):
self.levels = {0: [b'1', b'', '2', '', 0, 1, 0.0, 1.0, None, False, True, "a ", "a string", "b string", "b str"]}
def addEvenly(self, levels, count):
for level in range(1, levels+1):
self.addValues(level, count)
def all(self):
res = []
for valueList in self.levels.values():
res.extend(valueList)
return res
def addValues(self, level, count, sublevels = None):
assert level > 0
if sublevels is None:
sublevels = list(range(level))
sublevels = [x for x in sublevels if x in self.levels]
assert sublevels
def picker():
whichLevel = choice(sublevels)
try:
return choice(self.levels[whichLevel])
except Exception:
print(self.levels[whichLevel])
raise
for _ in range(count):
val = self.randomValue(picker)
if not isinstance(val,list):
val = [val]
self.levels.setdefault(level, []).extend(val)
def randomValue(self, picker):
def randomTuple():
return makeTuple(*[picker() for i in range(choice([0,1,2,3,4]))])
def randomNamedTupleDict():
return {"x_%s" % i: picker() for i in range(choice([0,1,2,3,4]))}
def randomNamedTuple():
return makeNamedTuple(**randomNamedTupleDict())
def randomDict():
return makeDict({picker():picker() for i in range(choice([0,1,2,3,4]))})
def randomTupleOf():
return makeTupleOf(*[picker() for i in range(choice([0,1,2,3,4]))])
def randomAlternative():
return makeAlternative([randomNamedTupleDict() for i in range(choice([1,2,3,4]))])
return choice([randomTuple,randomNamedTuple,randomDict,randomTupleOf,randomAlternative,picker])()
def pickRandomly(self):
return choice(self.levels[choice(list(self.levels))])
class NativeTypesTests(unittest.TestCase):
def check_expected_performance(self, elapsed, expected=1.0):
if os.environ.get('TRAVIS_CI', None) is not None:
expected = 2 * expected
self.assertTrue(
elapsed < expected,
"Slow Performance: expected to take {expected} sec, but took {elapsed}"
.format(expected=expected, elapsed=elapsed)
)
def test_objects_are_singletons(self):
self.assertTrue(Int8 is Int8)
self.assertTrue(NoneType is NoneType)
def test_object_binary_compatibility(self):
ibc = _types.isBinaryCompatible
self.assertTrue(ibc(NoneType, NoneType))
self.assertTrue(ibc(Int8, Int8))
NT = NamedTuple(a=int,b=int)
class X(NamedTuple(a=int,b=int)):
pass
class Y(NamedTuple(a=int,b=int)):
pass
self.assertTrue(ibc(X, X))
self.assertTrue(ibc(X, Y))
self.assertTrue(ibc(X, NT))
self.assertTrue(ibc(Y, NT))
self.assertTrue(ibc(NT, Y))
self.assertFalse(ibc(OneOf(int, float), OneOf(float, int)))
self.assertTrue(ibc(OneOf(int, X), OneOf(int, Y)))
self.assertIsInstance(OneOf(None, X)(Y()), X)
self.assertIsInstance(NamedTuple(x=OneOf(None, X))(x=Y()).x, X)
def test_binary_compatibility_incompatible_alternatives(self):
ibc = _types.isBinaryCompatible
A1 = Alternative("A1", X={'a': int}, Y={'b': float})
A2 = Alternative("A2", X={'a': int}, Y={'b': str})
self.assertTrue(ibc(A1, A1.X))
self.assertTrue(ibc(A1, A1.Y))
self.assertTrue(ibc(A1.Y, A1.Y))
self.assertTrue(ibc(A1.Y, A1))
self.assertTrue(ibc(A1.X, A1))
self.assertFalse(ibc(A1.X, A1.Y))
self.assertFalse(ibc(A1, A2))
self.assertFalse(ibc(A1.X, A2.X))
self.assertFalse(ibc(A1.Y, A2.Y))
def test_binary_compatibility_compatible_alternatives(self):
ibc = _types.isBinaryCompatible
A1 = Alternative("A1", X={'a': int}, Y={'b': float})
A2 = Alternative("A2", X={'a': int}, Y={'b': float})
self.assertTrue(ibc(A1.X, A2.X))
self.assertTrue(ibc(A1.Y, A2.Y))
self.assertFalse(ibc(A1.X, A2.Y))
self.assertFalse(ibc(A1.Y, A2.X))
def test_object_bytecounts(self):
self.assertEqual(_types.bytecount(NoneType), 0)
self.assertEqual(_types.bytecount(Int8), 1)
self.assertEqual(_types.bytecount(Int64), 8)
def test_type_stringification(self):
for t in ['Int8', 'NoneType']:
self.assertEqual(str(getattr(_types,t)()), "<class '%s'>" % t)
def test_tuple_of(self):
tupleOfInt = TupleOf(int)
i = tupleOfInt(())
i = tupleOfInt((1,2,3))
self.assertEqual(len(i), 3)
self.assertEqual(tuple(i), (1,2,3))
for x in range(10):
self.assertEqual(
tuple(tupleOfInt(tuple(range(x)))),
tuple(range(x))
)
with self.assertRaisesRegex(AttributeError, "do not accept attributes"):
tupleOfInt((1,2,3)).x = 2
def test_one_of_alternative(self):
X = Alternative("X", V={'a': int})
O = OneOf(None, X)
self.assertEqual(O(X.V(a=10)), X.V(a=10))
def test_one_of_py_subclass(self):
class X(NamedTuple(x=int)):
def f(self):
return self.x
O = OneOf(None, X)
self.assertEqual(NamedTuple(x=int)(x=10).x, 10)
self.assertEqual(X(x=10).f(), 10)
self.assertEqual(O(X(x=10)).f(), 10)
def test_tuple_of_tuple_of(self):
tupleOfInt = TupleOf(int)
tupleOfTupleOfInt = TupleOf(tupleOfInt)
pyVersion = (1,2,3),(1,2,3,4)
nativeVersion = tupleOfTupleOfInt(pyVersion)
self.assertEqual(len(nativeVersion), 2)
self.assertEqual(len(nativeVersion[0]), 3)
self.assertEqual(tuple(tuple(x) for x in nativeVersion), pyVersion)
bigTup = tupleOfInt(list(range(1000)))
t0 = time.time()
t = (bigTup,bigTup,bigTup,bigTup,bigTup)
for i in range(1000000):
tupleOfTupleOfInt(t)
elapsed = time.time() - t0
print("Took ", elapsed, " to do 1mm")
self.check_expected_performance(elapsed)
def test_default_initializer_oneof(self):
x = OneOf(None, int)
self.assertTrue(x() is None, repr(x()))
def test_tuple_of_various_things(self):
for thing, typeOfThing in [("hi", str), (b"somebytes", bytes),
(1.0, float), (2, int),
(None, type(None))
]:
tupleType = TupleOf(typeOfThing)
t = tupleType((thing,))
self.assertTrue(type(t[0]) is typeOfThing)
self.assertEqual(t[0], thing)
def test_tuple_assign_fails(self):
with self.assertRaisesRegex(TypeError, "does not support item assignment"):
(1,2,3)[10] = 20
with self.assertRaisesRegex(TypeError, "does not support item assignment"):
TupleOf(int)((1,2,3))[10] = 20
def test_list_of(self):
L = ListOf(int)
self.assertEqual(L.__qualname__, "ListOf(Int64)")
l = L([1,2,3,4])
self.assertEqual(l[0], 1)
self.assertEqual(l[-1], 4)
l[0] = 10
self.assertEqual(l[0], 10)
l[-1] = 11
self.assertEqual(l[3], 11)
with self.assertRaisesRegex(IndexError, "index out of range"):
l[100] = 20
l2 = L((10,2,3,11))
self.assertEqual(l,l2)
self.assertNotEqual(l,(10,2,3,11))
self.assertEqual(l,[10,2,3,11])
self.assertEqual(str(l),str([10,2,3,11]))
l3 = l + l2
self.assertEqual(l3, [10,2,3,11,10,2,3,11])
l3.append(23)
self.assertEqual(l3, [10,2,3,11,10,2,3,11, 23])
def test_list_resize(self):
l = ListOf(TupleOf(int))()
l.resize(10)
self.assertEqual(l.reserved(), 10)
self.assertEqual(len(l), 10)
emptyTup = TupleOf(int)()
aTup = TupleOf(int)((1,2,3))
self.assertEqual(list(l), [emptyTup] * 10)
l.resize(20, aTup)
self.assertEqual(list(l), [emptyTup] * 10 + [aTup] * 10)
self.assertEqual(_types.refcount(aTup), 11)
self.assertEqual(l.pop(15), aTup)
self.assertEqual(l.pop(5), emptyTup)
self.assertEqual(_types.refcount(aTup), 10)
l.resize(15)
with self.assertRaises(IndexError):
l.pop(100)
self.assertEqual(_types.refcount(aTup), 7) #6 in the list because we popped at '5'
l.pop()
self.assertEqual(_types.refcount(aTup), 6)
#this pops one of the empty tuples
l.pop(-10)
self.assertEqual(_types.refcount(aTup), 6)
l.clear()
self.assertEqual(len(l), 0)
def test_one_of(self):
o = OneOf(None, str)
self.assertEqual(o("hi"), "hi")
self.assertTrue(o(None) is None)
o = OneOf(None, "hi", 1.5, 1, True, b"hi2")
self.assertTrue(o(None) is None)
self.assertTrue(o("hi") == "hi")
self.assertTrue(o(b"hi2") == b"hi2")
self.assertTrue(o(1.5) == 1.5)
self.assertTrue(o(1) is 1)
self.assertIs(o(True), True)
with self.assertRaises(TypeError):
o("hi2")
with self.assertRaises(TypeError):
o(b"hi")
with self.assertRaises(TypeError):
o(3)
with self.assertRaises(TypeError):
o(False)
def test_ordering(self):
o = OneOf(None, "hi", 1.5, 1, True, b"hi2")
self.assertIs(o(True), True)
def test_one_of_flattening(self):
self.assertEqual(OneOf(OneOf(None, 1.0), OneOf(2.0, 3.0)), OneOf(None, 1.0, 2.0, 3.0))
def test_one_of_order_matters(self):
self.assertNotEqual(OneOf(1.0, 2.0), OneOf(2.0, 1.0))
def test_type_filter(self):
EvenInt = TypeFilter(int, lambda i: i % 2 == 0)
self.assertTrue(isinstance(2, EvenInt))
self.assertFalse(isinstance(1, EvenInt))
self.assertFalse(isinstance(2.0, EvenInt))
EvenIntegers = TupleOf(EvenInt)
e = EvenIntegers(())
e2 = e + (2,4,0)
with self.assertRaises(TypeError):
EvenIntegers((1,))
with self.assertRaises(TypeError):
e2 + (1,)
def test_tuple_of_one_of_fixed_size(self):
t = TupleOf(OneOf(0,1,2,3,4))
ints = tuple([x % 5 for x in range(1000000)])
typedInts = t(ints)
self.assertEqual(len(serialize(t, typedInts)), len(ints) + 8) #4 bytes for size of list, 4 bytes for frame size
self.assertEqual(tuple(typedInts), ints)
def test_tuple_of_one_of_multi(self):
t = TupleOf(OneOf(int, bool))
someThings = tuple([100 + x % 5 if x % 17 != 0 else bool(x%19) for x in range(1000000)])
typedThings = t(someThings)
self.assertEqual(
len(serialize(t, typedThings)),
sum(2 if isinstance(t,bool) else 9 for t in someThings) + 8
)
self.assertEqual(tuple(typedThings), someThings)
def test_compound_oneof(self):
producer = RandomValueProducer()
producer.addEvenly(1000, 2)
for _ in range(1000):
vals = (producer.pickRandomly(), producer.pickRandomly(), producer.pickRandomly())
a = OneOf(vals[0], vals[1], type(vals[2]))
for v in vals:
self.assertEqual(a(v), v, (a(v),v))
tup = TupleOf(a)
tupInst = tup(vals)
for i in range(len(vals)):
self.assertEqual(tupInst[i], vals[i], vals)
def test_one_of_conversion_failure(self):
o = OneOf(None, str)
with self.assertRaises(TypeError):
o(b"bytes")
def test_one_of_in_tuple(self):
t = Tuple(OneOf(None, str), str)
self.assertEqual(t(("hi","hi2"))[0], "hi")
self.assertEqual(t(("hi","hi2"))[1], "hi2")
self.assertEqual(t((None,"hi2"))[1], "hi2")
self.assertEqual(t((None,"hi2"))[0], None)
with self.assertRaises(TypeError):
t((None,None))
with self.assertRaises(IndexError):
t((None,"hi2"))[2]
def test_one_of_composite(self):
t = OneOf(TupleOf(str), TupleOf(float))
self.assertIsInstance(t((1.0,2.0)), TupleOf(float))
self.assertIsInstance(t(("1.0","2.0")), TupleOf(str))
with self.assertRaises(TypeError):
t((1.0,"2.0"))
def test_named_tuple(self):
t = NamedTuple(a=int, b=int)
with self.assertRaisesRegex(AttributeError, "object has no attribute"):
t().asdf
with self.assertRaisesRegex(AttributeError, "immutable"):
t().a = 1
self.assertEqual(t()[0], 0)
self.assertEqual(t().a, 0)
self.assertEqual(t()[1], 0)
self.assertEqual(t(a=1,b=2).a, 1)
self.assertEqual(t(a=1,b=2).b, 2)
def test_named_tuple_construction(self):
t = NamedTuple(a=int, b=int)
self.assertEqual(t(a=10).a, 10)
self.assertEqual(t(a=10).b, 0)
self.assertEqual(t(a=10,b=2).a, 10)
self.assertEqual(t(a=10,b=2).b, 2)
self.assertEqual(t({'a': 10,'b':2}).a, 10)
self.assertEqual(t({'a': 10,'b':2}).b, 2)
self.assertEqual(t({'b':2}).a, 0)
self.assertEqual(t({'b':2}).b, 2)
with self.assertRaises(TypeError):
t({'c':10})
with self.assertRaises(TypeError):
t(c=10)
def test_named_tuple_str(self):
t = NamedTuple(a=str, b=str)
self.assertEqual(t(a='1',b='2').a, '1')
self.assertEqual(t(a='1',b='2').b, '2')
self.assertEqual(t(b='2').a, '')
self.assertEqual(t(b='2').b, '2')
self.assertEqual(t().a, '')
self.assertEqual(t().b, '')
def test_tuple_of_string_perf(self):
t = NamedTuple(a=str, b=str)
t0 = time.time()
for i in range(1000000):
t(a="a", b="b").a
elapsed = time.time() - t0
print("Took ", elapsed, " to do 1mm")
self.check_expected_performance(elapsed)
def test_comparisons_in_one_of(self):
t = OneOf(None, float)
def map(x):
if x is None:
return -1000000.0
else:
return x
lt = lambda a,b: map(a) < map(b)
le = lambda a,b: map(a) <= map(b)
eq = lambda a,b: map(a) == map(b)
ne = lambda a,b: map(a) != map(b)
gt = lambda a,b: map(a) > map(b)
ge = lambda a,b: map(a) >= map(b)
funcs = [lt,le,eq,ne,gt,ge]
ts = [None,1.0,2.0,3.0]
for f in funcs:
for t1 in ts:
for t2 in ts:
self.assertTrue(f(t1,t2) is f(t(t1),t(t2)))
def test_comparisons_equivalence(self):
t = TupleOf(OneOf(None, str, bytes, float, int, bool, TupleOf(int)),)
def lt(a,b): return a < b
def le(a,b): return a <= b
def eq(a,b): return a == b
def ne(a,b): return a != b
def gt(a,b): return a > b
def ge(a,b): return a >= b
funcs = [lt,le,eq,ne,gt,ge]
tgroups = [
[1.0,2.0,3.0],
[1,2,3],
[True,False],
["a","b","ab","bb","ba","aaaaaaa","","asdf"],
["1","2","3","12","13","23","24","123123", "0", ""],
[b"a",b"b",b"ab",b"bb",b"ba",b"aaaaaaa",b"",b"asdf"],
[(1,2),(1,2,3),(),(1,1),(1,)]
]
for ts in tgroups:
for f in funcs:
for t1 in ts:
for t2 in ts:
self.assertTrue(f(t1,t2) is f(t((t1,)),t((t2,))),
(f, t1,t2, f(t1,t2), f(t((t1,)),t((t2,))))
)
def test_const_dict(self):
t = ConstDict(str,str)
self.assertEqual(len(t()), 0)
self.assertEqual(len(t({})), 0)
self.assertEqual(len(t({'a':'b'})), 1)
self.assertEqual(t({'a':'b'})['a'], 'b')
self.assertEqual(t({'a':'b','b':'c'})['b'], 'c')
self.assertTrue("a" in deserialize(t,serialize(t, t({'a':'b'}))))
self.assertTrue("a" in deserialize(t,serialize(t, t({'a':'b','b':'c'}))))
self.assertTrue("a" in deserialize(t,serialize(t, t({'a':'b','b':'c','c':'d'}))))
self.assertTrue("a" in deserialize(t,serialize(t, t({'a':'b','b':'c','c':'d','d':'e'}))))
self.assertTrue("c" in deserialize(t,serialize(t, t({'a':'b','b':'c','c':'d','def':'e'}))))
self.assertTrue("def" in deserialize(t,serialize(t, t({'a':'b','b':'c','c':'d','def':'e'}))))
def test_const_dict_get(self):
a = ConstDict(str,str)({'a':'b','c':'d'})
self.assertEqual(a.get('a'),'b')
self.assertEqual(a.get('asdf'),None)
self.assertEqual(a.get('asdf',20),20)
def test_const_dict_items_keys_and_values(self):
a = ConstDict(str,str)({'a':'b','c':'d'})
self.assertEqual(sorted(a.items()), [('a','b'),('c','d')])
self.assertEqual(sorted(a.keys()), ['a','c'])
self.assertEqual(sorted(a.values()), ['b','d'])
def test_empty_string(self):
a = ConstDict(str,str)({'a':''})
print(a['a'])
def test_dict_to_oneof(self):
t = ConstDict(str,OneOf("A","B","ABCDEF"))
a = t({'a':'A','b':'ABCDEF'})
self.assertEqual(a['a'], "A")
self.assertEqual(a['b'], "ABCDEF")
self.assertEqual(a, deserialize(t,serialize(t,a)))
def test_deserialize_primitive(self):
x = deserialize(str, serialize(str, "a"))
self.assertTrue(isinstance(x,str))
def test_dict_containment(self):
for _ in range(100):
producer = RandomValueProducer()
producer.addEvenly(20, 2)
values = producer.all()
for v in values:
if str(type(v))[:17] == "<class 'ConstDict":
v = deserialize(type(v), serialize(type(v), v))
for k in v:
self.assertTrue(k in v)
def test_named_tuple_from_dict(self):
N = NamedTuple(x=int, y=str,z=OneOf(None,"hihi"))
self.assertEqual(N().x, 0)
self.assertEqual(N().y, "")
self.assertEqual(N().z, None)
self.assertEqual(N({}).x, 0)
self.assertEqual(N({}).y, "")
self.assertEqual(N({}).z, None)
self.assertEqual(N({'x': 20}).x, 20)
self.assertEqual(N({'x': 20, 'y': "30"}).y, "30")
self.assertEqual(N({'y': "30", 'x': 20}).y, "30")
self.assertEqual(N({'z': "hihi"}).z, "hihi")
with self.assertRaises(Exception):
N({'r': 'hi'})
N({'y': 'hi', 'z': "not hihi"})
N({'a': 0, 'b': 0, 'c': 0, 'd': 0})
def test_const_dict_mixed(self):
t = ConstDict(str,int)
self.assertTrue(t({"a":10})["a"] == 10)
t = ConstDict(int, str)
self.assertTrue(t({10:"a"})[10] == "a")
def test_const_dict_comparison(self):
t = ConstDict(str,str)
self.assertEqual(t({'a':'b'}), t({'a':'b'}))
self.assertLess(t({}), t({'a':'b'}))
def test_const_dict_lookup(self):
for type_to_use, vals in [
(int, list(range(20))),
(bytes, [b'1', b'2', b'3', b'4', b'5'])
]:
t = ConstDict(type_to_use, type_to_use)
for _ in range(10):
ks = list(vals)
vs = list(vals)
numpy.random.shuffle(ks)
numpy.random.shuffle(vs)
py_d = {}
for i in range(len(ks)):
py_d[ks[i]] = vs[i]
typed_d = t(py_d)
for k in py_d:
self.assertEqual(py_d[k], typed_d[k])
last_k = None
for k in typed_d:
assert last_k is None or k > last_k, (k,last_k)
last_k = k
def test_const_dict_lookup_time(self):
int_dict = ConstDict(int, int)
d = int_dict({k:k for k in range(1000000)})
for k in range(1000000):
self.assertTrue(k in d)
self.assertTrue(d[k] == k)
def test_const_dict_of_dict(self):
int_dict = ConstDict(int, int)
int_dict_2 = ConstDict(int_dict,int_dict)
d = int_dict({1:2})
d2 = int_dict({1:2,3:4})
big = int_dict_2({d:d2})
self.assertTrue(d in big)
self.assertTrue(d2 not in big)
self.assertTrue(big[d] == d2)
def test_dict_hash_perf(self):
str_dict = ConstDict(str, str)
s = str_dict({'a' * 1000000: 'b' * 1000000})
t0 = time.time()
for k in range(1000000):
hash(s)
elapsed = time.time() - t0
print(elapsed, " to do 1mm")
self.check_expected_performance(elapsed)
def test_const_dict_str_perf(self):
t = ConstDict(str,str)
t0 = time.time()
for i in range(100000):
t({str(k): str(k+1) for k in range(10)})
elapsed = time.time() - t0
print("Took ", elapsed, " to do 1mm")
self.check_expected_performance(elapsed)
def test_const_dict_int_perf(self):
t = ConstDict(int,int)
t0 = time.time()
for i in range(100000):
t({k:k+1 for k in range(10)})
elapsed = time.time() - t0
print("Took ", elapsed, " to do 1mm")
self.check_expected_performance(elapsed)
def test_const_dict_iter_int(self):
t = ConstDict(int,int)
aDict = t({k:k+1 for k in range(100)})
for k in aDict:
self.assertEqual(aDict[k], k+1)
def test_const_dict_iter_str(self):
t = ConstDict(str,str)
aDict = t({str(k):str(k+1) for k in range(100)})
for k in aDict:
self.assertEqual(aDict[str(k)], str(int(k)+1))
def test_alternatives_with_Bytes(self):
alt = Alternative(
"Alt",
x_0={'a':bytes}
)
self.assertEqual(alt.x_0(a=b''), alt.x_0(a=b''))
def test_alternatives_with_str_func(self):
alt = Alternative(
"Alt",
x_0={'a':bytes},
f=lambda self: 1,
__str__=lambda self: "not_your_usual_str"
)
self.assertEqual(alt.x_0().f(), 1)
self.assertEqual(str(alt.x_0()), "not_your_usual_str")
def test_named_tuple_subclass_magic_methods(self):
class X(NamedTuple(x=int,y=int)):
def __str__(self):
return "str override"
def __repr__(self):
return "repr override"
self.assertEqual(repr(X()), "repr override")
self.assertEqual(str(X()), "str override")
def test_empty_alternatives(self):
a = Alternative(
"Alt",
A={},
B={}
)
self.assertEqual(a.A(), a.A())
self.assertIsInstance(deserialize(a, serialize(a, a.A())), a.A)
self.assertEqual(a.A(), deserialize(a, serialize(a, a.A())))
self.assertEqual(a.B(), a.B())
self.assertNotEqual(a.A(), a.B())
self.assertNotEqual(a.B(), a.A())
def test_extracted_alternatives_have_correct_type(self):
Alt = Alternative(
"Alt",
A={},
B={}
)
tOfAlt = TupleOf(Alt)
a = Alt.A()
aTup = tOfAlt((a,))
self.assertEqual(a, aTup[0])
self.assertTrue(type(a) is type(aTup[0]))
def test_alternatives(self):
alt = Alternative(
"Alt",
child_ints={'x': int, 'y': int},
child_strings={'x': str, 'y': str}
)
self.assertTrue(issubclass(alt.child_ints, alt))
self.assertTrue(issubclass(alt.child_strings, alt))
a = alt.child_ints(x=10,y=20)
a2 = alt.child_ints(x=10,y=20)
self.assertEqual(a,a2)
self.assertTrue(isinstance(a, alt))
self.assertTrue(isinstance(a, alt.child_ints))
self.assertEqual(a.x, 10)
self.assertEqual(a.y, 20)
self.assertTrue(a.matches.child_ints)
self.assertFalse(a.matches.child_strings)
with self.assertRaisesRegex(AttributeError, "immutable"):
a.x = 20
def test_alternatives_comparison(self):
empty = Alternative("X", A={}, B={})
self.assertEqual(empty.A(), empty.A())
self.assertEqual(empty.B(), empty.B())
self.assertNotEqual(empty.A(), empty.B())
a = Alternative("X",
A={'a': int},
B={'b': int},
C={'c': str},
D={'d': bytes},
)
self.assertEqual(a.A(a=10), a.A(a=10))
self.assertNotEqual(a.A(a=10), a.A(a=11))
self.assertNotEqual(a.C(c=""), a.C(c="hi"))
self.assertFalse(a.C(c="") == a.C(c="hi"))
self.assertNotEqual(a.D(d=b""), a.D(d=b"hi"))
def test_alternatives_add_operator(self):
alt = Alternative(
"Alt",
child_ints={'x': int, 'y': int},
__add__=lambda l,r: (l,r)
)
a = alt.child_ints(x=0,y=2)
self.assertEqual(a+a,(a,a))
def test_alternatives_perf(self):
alt = Alternative(
"Alt",
child_ints={'x': int, 'y': int},
child_strings={'x': str, 'y': str}
)
t0 = time.time()
for i in range(1000000):
a = alt.child_ints(x=10,y=20)
a.matches.child_ints
a.x
elapsed = time.time() - t0
print("Took ", elapsed, " to do 1mm")
self.check_expected_performance(elapsed, expected=2.0)
def test_object_hashing_and_equality(self):
for _ in range(100):
producer = RandomValueProducer()
producer.addEvenly(20, 2)
values = producer.all()
for v1 in values:
for v2 in values:
if hash(v1) != hash(v2) and v1 == v2:
print(v1,v2, type(v1), type(v2))
for v1 in values:
for v2 in values:
if type(v1) == type(v2) and v1 == v2:
self.assertEqual(hash(v1), hash(v2), (v1, v2))
if type(v1) is type(v2):
self.assertEqual(repr(v1), repr(v2), (v1, v2, type(v1),type(v2)))
values = sorted([makeTuple(v) for v in values])
for i in range(len(values)-1):
self.assertTrue(values[i] <= values[i+1])
self.assertTrue(values[i+1] >= values[i])
def test_bytes_repr(self):
for _ in range(100000):
#always start with a '"' because otherwise python keeps chosing different
#initial characters.
someBytes = b'"' + numpy.random.uniform(size=2).tostring()
self.assertEqual(repr(makeTuple(someBytes)), repr((someBytes,)))
def test_equality_with_native_python_objects(self):
tups = [(1,2,3), (), ("2",), (b"2",), (1,2,3, "b"), (2,), (None,)]
for tup1 in tups:
self.assertEqual( makeTuple(*tup1), tup1 )
for tup2 in tups:
if tup1 != tup2:
self.assertNotEqual( makeTuple(*tup1), tup2 )
for tup1 in tups:
self.assertEqual( makeTupleOf(*tup1), tup1 )
for tup2 in tups:
if tup1 != tup2:
self.assertNotEqual( makeTupleOf(*tup1), tup2 )
def test_add_tuple_of(self):
tupleOfInt = TupleOf(int)
tups = [(),(1,2),(1,),(1,2,3,4)]
for tup1 in tups:
for tup2 in tups:
self.assertEqual(tupleOfInt(tup1) + tupleOfInt(tup2), tupleOfInt(tup1+tup2))
self.assertEqual(tupleOfInt(tup1) + tup2, tupleOfInt(tup1+tup2))
def test_slice_tuple_of(self):
tupleOfInt = TupleOf(int)
ints = tuple(range(20))
aTuple = tupleOfInt(ints);
for i in range(-21,21):
for i2 in range(-21, 21):
for step in range(-3, 3):
if step != 0:
self.assertEqual(aTuple[i:i2:step], ints[i:i2:step])
try:
ints[i]
self.assertEqual(aTuple[i], ints[i])
except IndexError:
with self.assertRaises(IndexError):
aTuple[i]
def test_dictionary_subtraction_basic(self):
intDict = ConstDict(int,int)
self.assertEqual(intDict({1:2}) - (1,), intDict({}))
self.assertEqual(intDict({1:2, 3:4}) - (1,), intDict({3:4}))
self.assertEqual(intDict({1:2, 3:4}) - (3,), intDict({1:2}))
def test_dictionary_addition_and_subtraction(self):
someDicts = [{i:choice([1,2,3,4,5]) for i in range(choice([4,6,10,20]))} for _ in range(20)]
intDict = ConstDict(int,int)
for d1 in someDicts:
for d2 in someDicts:
addResult = dict(d1)
addResult.update(d2)
self.assertEqual(intDict(d1) + intDict(d2), intDict(addResult))
res = intDict(addResult)
while len(res):
toRemove = []
for i in range(choice(list(range(len(res))))+1):
key = choice(list(addResult))
del addResult[key]
toRemove.append(key)
res = res - toRemove
self.assertEqual(res, intDict(addResult))
def test_subclassing(self):
BaseTuple = NamedTuple(x=int,y=float)
class NTSubclass(BaseTuple):
def f(self):
return self.x + self.y
def __repr__(self):
return "ASDF"
inst = NTSubclass(x=10,y=20)
self.assertTrue(isinstance(inst, BaseTuple))
self.assertTrue(isinstance(inst, NTSubclass))
self.assertTrue(type(inst) is NTSubclass)
self.assertEqual(repr(inst), "ASDF")
self.assertNotEqual(BaseTuple.__repr__(inst), "ASDF")
self.assertEqual(inst.x, 10)
self.assertEqual(inst.f(), 30)
TupleOfSubclass = TupleOf(NTSubclass)
instTup = TupleOfSubclass((inst,BaseTuple(x=20,y=20.0)))
self.assertTrue(isinstance(instTup[0], NTSubclass))
self.assertTrue(isinstance(instTup[1], NTSubclass))
self.assertEqual(instTup[0].f(), 30)
self.assertEqual(instTup[1].f(), 40)
self.assertEqual(BaseTuple(inst).x, 10)
self.assertTrue(OneOf(None, NTSubclass)(None) is None)
self.assertTrue(OneOf(None, NTSubclass)(inst) == inst)
def test_serialization(self):
ints = TupleOf(int)((1,2,3,4))
self.assertEqual(
len(serialize(TupleOf(int), ints)),
40
)
while len(ints) < 1000000:
ints = ints + ints
t0 = time.time()
self.assertEqual(len(serialize(TupleOf(int), ints)), len(ints) * 8 + 8)
print(time.time() - t0, " for ", len(ints))
def test_serialization_roundtrip(self):
badlen = None
for _ in range(100):
producer = RandomValueProducer()
producer.addEvenly(30, 3)
values = producer.all()
for v in values:
ser = serialize(type(v), v)
v2 = deserialize(type(v), ser)
ser2 = serialize(type(v), v2)
self.assertTrue(type(v2) is type(v))
self.assertEqual(ser,ser2)
self.assertEqual(str(v), str(v2))
self.assertEqual(v, v2)
def test_serialize_doesnt_leak(self):
T = TupleOf(int)
def getMem():
return psutil.Process().memory_info().rss / 1024 ** 2
m0 = getMem()
for passIx in range(100):
for i in range(1000):
t = T(list(range(i)))
deserialize(T, serialize(T,t))
self.assertTrue(getMem() < m0 + 100)
def test_const_dict_of_tuple(self):
K = NamedTuple(a=OneOf(float, int), b=OneOf(float, int))
someKs = [K(a=0,b=0), K(a=1), K(a=10), K(b=10), K()]
T = ConstDict(K, K)
indexDict = {}
x = T()
numpy.random.seed(42)
for _ in range(100):
i1 = numpy.random.choice(len(someKs))
i2 = numpy.random.choice(len(someKs))
add = numpy.random.choice([False, True])
if add:
indexDict[i1] = i2
x = x + {someKs[i1]: someKs[i2]}
else:
if i1 in indexDict:
del indexDict[i1]
x = x - (someKs[i1],)
self.assertEqual(x, T({someKs[i]:someKs[v] for i,v in indexDict.items()}))
for k in x:
self.assertTrue(k in x)
x[k]
def test_conversion_of_binary_compatible(self):
class T1(NamedTuple(a=int)):
pass
class T2(NamedTuple(a=int)):
pass
class T1Comp(NamedTuple(d=ConstDict(str, T1))):
pass
class T2Comp(NamedTuple(d=ConstDict(str, T1))):
pass
aT1C = T1Comp(d={'a': T1(a=10)})
self.assertEqual(T2Comp(aT1C).d['a'].a, 10)
self.assertEqual(aT1C, deserialize(T1Comp, serialize(T2Comp, aT1C)))
def test_conversion_of_binary_compatible_nested(self):
def make():
class Interior(NamedTuple(a=int)):
pass
class Exterior(NamedTuple(a=Interior)):
pass
return Exterior
E1 = make()
E2 = make()
OneOf(None, E2)(E1())
def test_python_objects_in_tuples(self):
class NormalPyClass(object):
pass
class NormalPySubclass(NormalPyClass):
pass
NT = NamedTuple(x=NormalPyClass, y=NormalPySubclass)
nt = NT(x=NormalPyClass(),y=NormalPySubclass())
self.assertIsInstance(nt.x, NormalPyClass)
self.assertIsInstance(nt.y, NormalPySubclass)
def test_construct_alternatives_with_positional_arguments(self):
a = Alternative("A", HasOne = {'a': str}, HasTwo = {'a': str, 'b': str})
with self.assertRaises(TypeError):
a.HasTwo("hi")
self.assertEqual(a.HasOne("hi"), a.HasOne(a="hi"))
hasOne = a.HasOne("hi")
self.assertEqual(a.HasOne(hasOne), hasOne)
with self.assertRaises(TypeError):
a.HasOne(a.HasTwo(a='1',b='b'))
def test_recursive_classes_repr(self):
class ASelfRecursiveClass(Class):
x = Member(OneOf(None, lambda: ASelfRecursiveClass))
a = ASelfRecursiveClass()
a.x = a
b = ASelfRecursiveClass()
b.x = b
print(repr(a))
def test_unsafe_pointers_to_list_internals(self):
x = ListOf(int)()
x.resize(100)
for i in range(len(x)):
x[i] = i
aPointer = x.pointerUnsafe(0)
self.assertTrue(str(aPointer).startswith("(Int64*)0x"))
self.assertEqual(aPointer.get(), x[0])
aPointer.set(100)
self.assertEqual(aPointer.get(), 100)
self.assertEqual(x[0], 100)
aPointer = aPointer + 10
self.assertEqual(aPointer.get(), x[10])
self.assertEqual(aPointer[10], x[20])
aPointer.set(20)
self.assertEqual(aPointer.get(), 20)
self.assertEqual(x[10], 20)
#this is OK because ints are POD.
aPointer.initialize(30)
self.assertEqual(x[10], 30)
def test_unsafe_pointers_to_uninitialized_list_items(self):
#because this is testing unsafe operations, the test is
#really just that we don't segfault!
for _ in range(100):
x = ListOf(TupleOf(int))()
x.reserve(10)
for i in range(x.reserved()):
x.pointerUnsafe(i).initialize((i,))
x.setSizeUnsafe(10)
#now check that if we fail to set the size we'll leak the tuple
aLeakedTuple = TupleOf(int)((1,2,3))
x = ListOf(TupleOf(int))()
x.reserve(1)
x.pointerUnsafe(0).initialize(aLeakedTuple)
x = None
self.assertEqual(_types.refcount(aLeakedTuple), 2)
def test_list_copy_operation_duplicates_list(self):
T = ListOf(int)
x = T([1,2,3])
y = T(x)
x[0] = 100
self.assertNotEqual(y[0], 100)
def test_list_and_tuple_conversion_to_numpy(self):
for T in [ListOf(bool), TupleOf(bool)]:
for arr in [
numpy.array([]),
numpy.array([0,1,2,3,4,5]),
numpy.array([0,1,2,3,4,5], 'int32'),
numpy.array([0,1,2,3,4,5], 'int16'),
numpy.array([0,1,2,3,4,5], 'bool')
]:
self.assertEqual(T(arr), T(arr.tolist()))
self.assertEqual(T(arr).toArray().tolist(), [bool(x) for x in arr.tolist()])
for T in [ListOf(int), TupleOf(int)]:
for arr in [
numpy.array([]),
numpy.array([1,2,3,4,5]),
numpy.array([1,2,3,4,5], 'int32'),
numpy.array([1,2,3,4,5], 'int16')
]:
self.assertEqual(T(arr), T(arr.tolist()))
self.assertEqual(T(arr).toArray().tolist(), arr.tolist())
for T in [ListOf(float), TupleOf(float)]:
for arr in [
| numpy.array([]) | numpy.array |
############### Functions for initializing satellites ###################
# <NAME> 2019, HUJI
# <NAME> 2020, Yale University
#########################################################################
import numpy as np
import sys
from . import config as cfg
from . import cosmo as co
from . import galhalo as gh
from scipy.interpolate import splev, lognorm
#########################################################################
# ---for initial satellite-galaxy stellar size
def Reff(Rv, c2):
"""
Draw half-stellar-mass radius from a Gaussian whose median is
given by the Jiang+19 relation, and the 1-sigma scatter is 0.12dex.
Syntax:
Reff(Rv,c2)
where
Rv: halo radius R_200c [kpc] (float or array)
c2: concentration, R_00c / r_-2 (float or array of the same size
as Rv)
Note that we do not allow the half-stellar-mass radius to exceed
0.2 R_200c.
Return:
half-stellar-mass radius [kpc] (float or array of the same size
as Rv)
"""
mu = np.log10(gh.Reff(Rv, c2))
return np.minimum(0.2 * Rv, 10.0 ** np.random.normal(mu, 0.12))
def Rvir(Mv, Delta=200.0, z=0.0):
"""
Compute halo radius given halo mass, overdensity, and redshift.
Syntax:
Rvir(Mv,Delta=200.,z=0.)
where
Mv: halo mass [M_sun] (float or array)
Delta: spherical overdensity (float, default=200.)
z: redshift (float or array of the same size as Mv, default=0.)
"""
rhoc = co.rhoc(z, h=cfg.h, Om=cfg.Om, OL=cfg.OL)
return (3.0 * Mv / (cfg.FourPi * Delta * rhoc)) ** (1.0 / 3.0)
# ---for initial (sub)halo profiles
# for drawing the conventional halo concentration, c_-2
def concentration(Mv, z=0.0, choice="DM14"):
"""
Draw the conventional halo concentration, c_-2, from an empirical
concentration-mass-redshift relation of choice.
Syntax:
concentration(Mv,z=0.,choice='DM14')
where
Mv: halo mass, can be M_200c or M_vir, depending on the "choice"
[M_sun] (float or array)
z: redshift (float or array of the same size as Mv, default=0.)
choice: the relation of choice (default='DM14', representing
Dutton & Maccio 14)
Note that we do not allow concentration to go below 3.
"""
if choice == "DM14":
mu = gh.lgc2_DM14(Mv, z)
return np.maximum(3.0, 10.0 ** np.random.normal(mu, 0.1))
# for drawing stellar mass from stellar-to-halo-mass relations (SHMR)
def Mstar(Mv, z=0.0, choice="RP17"):
"""
Stellar mass given halo mass and redshift, using abundance-matching
relations.
We assume a 1-sigma scatter of 0.2 in log(stellar mass) at a given
halo mass <<< play with this later !!!
Syntax:
Ms(Mv,z=0.,choice='RP17')
where
Mv: halo mass [M_sun] (float or array)
z: instantaneous redshift (float or array of the same size as Mv,
default=0.)
choice: choice of the stellar-to-halo-mass relation
(default='RP17', representing Rodriguez-Puebla+17)
Return:
stellar mass [M_sun] (float or array of the same size as Mv)
"""
if choice == "RP17":
mu = gh.lgMs_RP17(np.log10(Mv), z)
if choice == "B13":
mu = gh.lgMs_B13(np.log10(Mv), z)
return np.minimum(cfg.Ob / cfg.Om * Mv, 10.0 ** np.random.normal(mu, 0.2))
# for drawing the Dekel+ parameters
def aDekel(X, c2, HaloResponse="NIHAO"):
"""
Draw the Dekel+ innermost slope, given the stellar-to-halo-mass
ratio, the conventional halo concentration parameter c_-2, redshift,
and halo-response pattern.
In particular, we use the halo response relation from simulations
to compute the slope, s_0.01, at r = 0.01 R_vir, assuming a 1-sigma
scatter of 0.18, motivated by Tollet+16; then, we express alpha with
s_0.01 and c_-2.
Syntax:
aDekel(X,c2,HaloResponse='NIHAO')
where
X: stellar-to-halo-mass ratio (float or array)
c2: concentration, c_-2 = R_vir / r_-2 (float or array of the
same size as X)
HaloResponse: choice of halo response --
'NIHAO' (default, Tollet+16, mimicking FIRE/NIHAO)
'APOSTLE' (Bose+19, mimicking APOSTLE/Auriga)
Return:
Dekel+ alpha (float or array of the same size as X)
"""
mu = gh.slope(X, HaloResponse)
s = np.maximum(0.0, np.random.normal(mu, 0.18))
r = np.sqrt(c2)
return (s + (2.0 * s - 7.0) * r / 15.0) / (1.0 + (s - 3.5) * r / 15.0)
def aDekel_GivenSlope(s1, c2):
"""
Compute the Dekel+ innermost slope, given the slope at 1% of virial
radius s_1, and the conventional halo concentration parameter c_2.
In particular, we use the halo response relation from simulations
to compute the slope, s_0.01, at r = 0.01 R_vir, assuming a 1-sigma
scatter of 0.18, motivated by Tollet+16; then, we express alpha with
s_0.01 and c_-2.
Syntax:
aDekel_GivenSlope(s,c2)
where
s: logarithmic density slope at 1% r_vir (float or array)
c2: concentration, c_-2 = R_vir / r_-2 (float or array of the
same size as X)
Return:
Dekel+ alpha (float or array of the same size as s)
"""
r = np.sqrt(c2)
return (s1 + (2.0 * s1 - 7.0) * r / 15.0) / (1.0 + (s1 - 3.5) * r / 15.0)
def cDekel(c2, alpha):
"""
Compute the Dekel+ concentration, c, using the conventional
concentration, c_-2, and the Dekel+ innermost slope, alpha.
Syntax:
cDekel(c2,alpha)
where
c2: concentration, c_-2 = R_vir / r_-2 (float or array)
alpha: Dekel+ innermost slope (float or array of the same size
as c2)
Return:
Dekel+ concentration (float or array of the same size as c2)
"""
return (2.0 - alpha) ** 2 / 2.25 * c2
def Dekel(Mv, z=0.0, HaloResponse="NIHAO"):
"""
Draw the Dekel+ structural parameters, c and alpha, as well as the
stellar mass, given halo mass, redshift, and halo-response pattern
Internally, it draws the conventional halo concentration, c_-2, which
is used to compute alpha.
Syntax:
Dekel(Mv,z=0.,HaloResponse='NIHAO')
where
Mv: halo mass [M_sun] (float or array)
z: redshift (float or array of the same size as Mv, default=0.)
HaloResponse: choice of halo response --
'NIHAO' (default, Tollet+16, mimicking FIRE/NIHAO)
'APOSTLE' (Bose+19, mimicking APOSTLE/Auriga)
Return:
Dekel+ concentration (float or array of the same size as Mv),
Dekel+ alpha (float or array of the same size as Mv),
stellar mass [M_sun] (float or array of the same size as Mv)
c_-2 (float or array of the same size as Mv)
DMO c_-2 (float or array of the same size as Mv)
"""
c2DMO = concentration(Mv, z)
if z > 6.0: # a safety: in the regime where the stellar-halo mass
# relations are not reliable, manually set the stellar mass
Ms = 1e-5 * Mv
else:
Ms = Mstar(Mv, z)
X = Ms / Mv
mu = gh.c2c2DMO(X, HaloResponse) # mean c_-2 / c_-2,DMO
c2c2DMO = np.maximum(0.0, np.random.normal(mu, 0.1))
c2 = c2DMO * c2c2DMO
c2 = np.maximum(2.0, c2) # safety: c_-2 cannot be unrealistically low
alpha = aDekel(X, c2, HaloResponse)
c = cDekel(c2, alpha)
return c, alpha, Ms, c2, c2DMO
def Dekel_fromMAH(Mv, t, z, HaloResponse="NIHAO"):
"""
Returns the Dekel+ structural parameters, c and alpha, given the
halo mass assembly history (MAH), using the Zhao+09 formula.
Syntax:
Dekel(Mv,t,z,HaloResponse='NIHAO')
where
Mv: main-branch mass history until the time of interest [M_sun]
(array)
t: the time series of the main-branch mass history (array of the
same length as Mv)
z: the instantaneous redshift (float)
HaloResponse: choice of halo response --
'NIHAO' (default, Tollet+16, mimicking FIRE/NIHAO)
'APOSTLE' (Bose+19, mimicking APOSTLE/Auriga)
Note that we need Mv and t in reverse chronological order, i.e., in
decreasing order, such that Mv[0] and t[0] are the instantaneous halo
mass and instantaneous cosmic time, respectively.
Return:
Dekel+ concentration (float),
Dekel+ alpha (float),
stellar mass [M_sun] (float),
c_-2 (float)
DMO c_-2 (float)
"""
c2DMO = gh.c2_Zhao09(Mv, t)
if z > 6.0: # a safety: in the regime where the stellar-halo mass
# relations are not reliable, manually set the stellar mass
Ms = 1e-5 * Mv[0]
else:
Ms = Mstar(Mv[0], z)
X = Ms / Mv[0]
mu = gh.c2c2DMO(X, HaloResponse) # mean c_-2 / c_-2,DMO
c2c2DMO = np.maximum(0.0, np.random.normal(mu, 0.1))
c2 = c2DMO * c2c2DMO
c2 = np.maximum(2.0, c2)
alpha = aDekel(X, c2, HaloResponse)
c = cDekel(c2, alpha)
return c, alpha, Ms, c2, c2DMO
def Burkert_SIDM1(Mv, Delta=200.0, z=0.0):
"""
Draw the Burkert concentration, c, given halo mass, halo definition,
and redshift, for SIDM halos with cross section per unit mass of
1 cm^2/g, according to the scaling relation of Rocha+13 eq.18 between
the SIDM Burkert scale radius and the CDM NFW scale radius.
Internally, it draws the CDM halo concentration, c_-2, using the
function init.concentration
Syntax:
Burkert_SIDM1(Mv,)
where
Mv: halo mass [M_sun] (float or array)
Delta: spherical overdensity of halos (float or array of
the same size as Mv, default=200.)
z: redshift (float or array of the same size as Mv, default=0.)
Return:
Burkert concentration (float or array of the same size as Mv),
CDM c_-2 (float or array of the same size as Mv)
"""
c2CDM = concentration(Mv, z)
Rv = Rvir(Mv, Delta=Delta, z=z)
c = c2CDM / 0.71 * (c2CDM * Rv / 10.0) ** 0.08
return c, c2CDM
def Burkert_SIDM1_fromMAH(Mv, t, z, Delta=200.0):
"""
Returns the Burkert concentration of a SIDM1 halo, given the
halo mass assembly history (MAH), using the Zhao+09 formula for CDM
halo concentration and the Rocha+13 relation between the Burkert
scale radius of SIDM1 halo and the scale radius of CDM halo.
Syntax:
Dekel(Mv,t,z,Delta=200.)
where
Mv: main-branch mass history until the time of interest [M_sun]
(array)
t: the time series of the main-branch mass history (array of the
same length as Mv)
z: the instantaneous redshift (float)
Delta: spherical overdensity of halos (float or array of
the same size as Mv, default=200.)
Note that we need Mv and t in reverse chronological order, i.e., in
decreasing order, such that Mv[0] and t[0] are the instantaneous halo
mass and instantaneous cosmic time, respectively.
Return:
Burkert concentration of SIDM1 halo (float),
The concentration c_-2 of the corresponding CDM halo (float)
Instantaneous virial radius [kpc] (float)
"""
c2CDM = gh.c2_Zhao09(Mv, t)
Rv = Rvir(Mv[0], Delta=Delta, z=z)
c = c2CDM / 0.71 * (c2CDM * Rv / 10.0) ** 0.08
return c, c2CDM, Rv
def c2_fromMAH(Mv, t, version="zhao"):
"""
Returns the NFW concentration, c_{-2}, given the halo mass
assembly history (MAH), using the Zhao+09 formula.
Syntax:
c2_fromMAH(Mv,t,version)
where
Mv: main-branch mass history until the time of interest [M_sun]
(array)
t: the time series of the main-branch mass history (array of the
same length as Mv)
version: 'zhao' or 'vdb' for the different versions of the
fitting function parameters (string)
Note that we need Mv and t in reverse chronological order, i.e., in
decreasing order, such that Mv[0] and t[0] are the instantaneous halo
mass and instantaneous cosmic time, respectively.
Return:
c_-2 (float)
"""
return gh.c2_Zhao09(Mv, t, version)
# ---for initializing orbit
def orbit(hp, xc=1.0, eps=0.5):
"""
Initialize the orbit of a satellite, given orbit energy proxy (xc)
and circularity (eps).
Syntax:
orbit(hp,xc=1.,eps=0.5,)
where
hp: host potential (a halo density profile object, as defined
in profiles.py)
xc: the orbital energy parameter, defined such that if the
energy of the orbit is E, x_c(E) is the radius of a circular
orbit in units of the host halo's virial radius (default=1.)
eps: the orbital circularity parameter (default=0.5)
Return:
phase-space coordinates in cylindrical frame
np.array([R,phi,z,VR,Vphi,Vz])
"""
r0 = hp.rh
rc = xc * hp.rh
theta = np.arccos(2.0 * np.random.random() - 1.0) # i.e., isotropy
zeta = 2.0 * np.pi * np.random.random() # i.e., uniform azimuthal
# angle, zeta, of velocity vector in theta-phi-r frame
Vc = hp.Vcirc(
rc,
)
Phic = hp.Phi(
rc,
)
Phi0 = hp.Phi(
r0,
)
V0 = np.sqrt(Vc ** 2 + 2.0 * (Phic - Phi0))
S = eps * rc / r0 * Vc / V0
gamma = np.pi - np.arcsin(S) # angle between r and v vectors. Note that
# we use pi - np.arcsin(S) instead of just np.arcsin(S), because
# the velocity needs to point inward the virial sphere.
if S > 1.0: # a safety, may not be useful
sys.exit("Invalid orbit! sin(gamma)=%.4f,xc=%4.2f,eps=%4.2f" % (S, xc, eps))
sintheta = np.sin(theta)
costheta = np.cos(theta)
singamma = np.sin(gamma)
cosgamma = np.cos(gamma)
sinzeta = np.sin(zeta)
coszeta = np.cos(zeta)
return np.array(
[
r0 * sintheta,
np.random.random() * 2.0 * np.pi, # uniformly random phi in (0,2pi)
r0 * costheta,
V0 * (singamma * coszeta * costheta + cosgamma * sintheta),
V0 * singamma * sinzeta,
V0 * (cosgamma * costheta - singamma * coszeta * sintheta),
]
)
def orbit_from_Jiang2015(hp, sp, z, sample_unbound=True):
"""
Initialize the orbit of a satellite by sampling from V/V_{200c}
and Vr/V distributions from Jiang+2015. Subhaloes are placed
on initial orbital radii of r_{200c} of the host. This is an
extension of the Jiang+15 model, as we use the host peak height,
rather than host mass at z=0, in order to determine which
distribution to sample from.
Syntax:
orbit_from_Jiang2015(hp, sp, z, sample_unbound)
where
hp: host *NFW* potential (a halo density profile object,
as defined in profiles.py)
sp: subhalo *NFW* potential (a halo density profile object,
as defined in profiles.py)
z: the redshift of accretion (float)
sample_unbound: set to true to allow orbits to potentially be
unbound at infall (boolean)
Return:
phase-space coordinates in cylindrical frame
np.array([R,phi,z,VR,Vphi,Vz])
Note:
This assumes NFW profiles profile, since we're using the
.otherMassDefinition() method that has only been implemented
for NFW.
"""
Mh200c, rh200c, ch200c = hp.otherMassDefinition(200.0)
Ms200c, rs200c, cs200c = sp.otherMassDefinition(200.0)
nu = co.nu(Mh200c, z, **cfg.cosmo)
mass_ratio = Ms200c / Mh200c
iM = np.searchsorted(cfg.jiang_nu_boundaries, nu)
imM = np.searchsorted(cfg.jiang_ratio_boundaries, mass_ratio)
rand_VV200c = np.random.uniform()
rand_VrV = np.random.uniform()
if sample_unbound == False:
vbyvv200c_max = np.sqrt(2.0 * np.abs(hp.Phi(rh200c))) / hp.Vcirc(rh200c)
while True:
rand_VV200c = np.random.uniform()
V_by_V200c = splev(rand_VV200c, cfg.V_by_V200c_interps[iM][imM])
if V_by_V200c < vbyvv200c_max: # sample until we get a bound orbit
break
else:
V_by_V200c = splev(rand_VV200c, cfg.V_by_V200c_interps[iM][imM])
Vr_by_V = splev(rand_VrV, cfg.Vr_by_V_interps[iM][imM])
gamma = np.pi - np.arccos(Vr_by_V)
V0 = V_by_V200c * hp.Vcirc(rh200c)
theta = np.arccos(2.0 * np.random.random() - 1.0) # i.e., isotropy
zeta = 2.0 * np.pi * np.random.random() # i.e., uniform azimuthal
# angle, zeta, of velocity vector in theta-phi-r frame
sintheta = np.sin(theta)
costheta = np.cos(theta)
singamma = np.sin(gamma)
cosgamma = np.cos(gamma)
sinzeta = np.sin(zeta)
coszeta = np.cos(zeta)
return np.array(
[
rh200c * sintheta,
np.random.random() * 2.0 * np.pi, # uniformly random phi in (0,2pi)
rh200c * costheta,
V0 * (singamma * coszeta * costheta + cosgamma * sintheta),
V0 * singamma * sinzeta,
V0 * (cosgamma * costheta - singamma * coszeta * sintheta),
]
)
def orbit_from_Li2020(hp, vel_ratio, gamma):
"""
Initialize the orbit of a satellite, given total velocity V/Vvir
and infall angle.
Syntax:
orbit(hp, vel_ratio, gamma)
where
hp: host potential (a halo density profile object, as defined
in profiles.py)
vel_ratio: the total velocity at infall in units of Vvir
gamma: the angle between velocity and position vectors of subhalo
Return:
phase-space coordinates in cylindrical frame
np.array([R,phi,z,VR,Vphi,Vz])
Note:
This assumes that the BN98 virial mass definition is used
for the haloes, since the host rh quantity is used as the radius
where the circular velocity is computed.
"""
r0 = hp.rh
V0 = vel_ratio * hp.Vcirc(r0)
theta = np.arccos(2.0 * np.random.random() - 1.0) # i.e., isotropy
zeta = 2.0 * np.pi * np.random.random() # i.e., uniform azimuthal
# angle, zeta, of velocity vector in theta-phi-r frame
sintheta = np.sin(theta)
costheta = np.cos(theta)
singamma = | np.sin(gamma) | numpy.sin |
# -*- coding: utf-8 -*-
from __future__ import print_function
from itertools import combinations
import numpy as np
from scipy import stats
import pandas as pd
from lifelines.utils import group_survival_table_from_events
from lifelines._statistics import concordance_index as _cindex
def concordance_index(event_times, predicted_event_times, event_observed=None):
"""
Calculates the concordance index (C-index) between two series
of event times. The first is the real survival times from
the experimental data, and the other is the predicted survival
times from a model of some kind.
The concordance index is a value between 0 and 1 where,
0.5 is the expected result from random predictions,
1.0 is perfect concordance and,
0.0 is perfect anti-concordance (multiply predictions with -1 to get 1.0)
See:
<NAME>, <NAME>, <NAME>. Multivariable prognostic models: issues in
developing models, evaluating assumptions and adequacy, and measuring and
reducing errors. Statistics in Medicine 1996;15(4):361-87.
Parameters:
event_times: a (n,) array of observed survival times.
predicted_event_times: a (n,) array of predicted survival times.
event_observed: a (n,) array of censorship flags, 1 if observed,
0 if not. Default assumes all observed.
Returns:
c-index: a value between 0 and 1.
"""
event_times = np.array(event_times, dtype=float)
predicted_event_times = np.array(predicted_event_times, dtype=float)
if event_observed is None:
event_observed = np.ones(event_times.shape[0], dtype=float)
if event_times.shape != predicted_event_times.shape:
raise ValueError("Event times and predictions must have the same shape")
if event_times.ndim != 1:
raise ValueError("Event times can only be 1-dimensional: (n,)")
# 100 times faster to calculate in Fortran
return _cindex(event_times,
predicted_event_times,
event_observed)
def logrank_test(event_times_A, event_times_B, event_observed_A=None, event_observed_B=None,
alpha=0.95, t_0=-1, suppress_print=False, **kwargs):
"""
Measures and reports on whether two intensity processes are different. That is, given two
event series, determines whether the data generating processes are statistically different.
The test-statistic is chi-squared under the null hypothesis.
H_0: both event series are from the same generating processes
H_A: the event series are from different generating processes.
Pre lifelines 0.2.x: this returned a test statistic.
Post lifelines 0.2.x: this returns the results of the entire test.
See Survival and Event Analysis, page 108. This implicitly uses the log-rank weights.
Parameters:
event_times_foo: a (nx1) array of event durations (birth to death,...) for the population.
censorship_bar: a (nx1) array of censorship flags, 1 if observed, 0 if not. Default assumes all observed.
t_0: the period under observation, -1 for all time.
alpha: the level of signifiance
suppress_print: if True, do not print the summary. Default False.
kwargs: add keywords and meta-data to the experiment summary
Returns
summary: a print-friendly string detailing the results of the test.
p: the p-value
test result: True if reject the null, (pendantically None if inconclusive)
"""
event_times_A, event_times_B = np.array(event_times_A), np.array(event_times_B)
if event_observed_A is None:
event_observed_A = np.ones(event_times_A.shape[0])
if event_observed_B is None:
event_observed_B = np.ones(event_times_B.shape[0])
event_times = np.r_[event_times_A, event_times_B]
groups = np.r_[np.zeros(event_times_A.shape[0]), np.ones(event_times_B.shape[0])]
event_observed = np.r_[event_observed_A, event_observed_B]
return multivariate_logrank_test(event_times, groups, event_observed,
alpha=alpha, t_0=t_0, suppress_print=suppress_print, **kwargs)
def pairwise_logrank_test(event_durations, groups, event_observed=None,
alpha=0.95, t_0=-1, bonferroni=True, suppress_print=False, **kwargs):
"""
Perform the logrank test pairwise for all n>2 unique groups (use the more appropriate logrank_test for n=2).
We have to be careful here: if there are n groups, then there are n*(n-1)/2 pairs -- so many pairs increase
the chance that here will exist a significantly different pair purely by chance. For this reason, we use the
Bonferroni correction (rewight the alpha value higher to accomidate the multiple tests).
Parameters:
event_durations: a (n,) numpy array the (partial) lifetimes of all individuals
groups: a (n,) numpy array of unique group labels for each individual.
event_observed: a (n,) numpy array of event_observed events: 1 if observed death, 0 if censored. Defaults
to all observed.
alpha: the level of signifiance desired.
t_0: the final time to compare the series' up to. Defaults to all.
bonferroni: If true, uses the Bonferroni correction to compare the M=n(n-1)/2 pairs, i.e alpha = alpha/M
See (here)[http://en.wikipedia.org/wiki/Bonferroni_correction].
suppress_print: if True, do not print the summary. Default False.
kwargs: add keywords and meta-data to the experiment summary.
Returns:
S: a (n,n) dataframe of print-friendly test summaries (np.nan on the diagonal). Ex:
P: a (n,n) dataframe of p-values (np.nan on the diagonal).
T: a (n,n) dataframe of test results (True is significant, None if not) (np.nan on the diagonal).
Example:
P:
a b c
a NaN 0.711136 0.401462
b 0.711136 NaN 0.734605
c 0.401462 0.734605 NaN
T:
a b c
a NaN None None
b None NaN None
c None None NaN
"""
if event_observed is None:
event_observed = np.ones((event_durations.shape[0], 1))
n = max(event_durations.shape)
assert n == max(event_durations.shape) == max(event_observed.shape), "inputs must be of the same length."
groups, event_durations, event_observed = map(lambda x: pd.Series(np.reshape(x, (n,))), [groups, event_durations, event_observed])
unique_groups = np.unique(groups)
n = unique_groups.shape[0]
if bonferroni:
m = 0.5 * n * (n - 1)
alpha = 1 - (1 - alpha) / m
P = np.zeros((n, n), dtype=float)
T = | np.empty((n, n), dtype=object) | numpy.empty |
import tensorflow_advanced_segmentation_models as tasm
import os
import cv2
import numpy as np
from time import time
import tensorflow as tf
import albumentations as A
import matplotlib.pyplot as plt
import tensorflow.keras.backend as K
from labelme.utils import lblsave as label_save
from common.utils import visualize_segmentation
from PIL import Image
DATA_DIR = "/home/minouei/Downloads/datasets/contract/version2"
x_train_dir = os.path.join(DATA_DIR, 'images/train')
y_train_dir = os.path.join(DATA_DIR, 'annotations/train')
x_valid_dir = os.path.join(DATA_DIR, 'images/val')
y_valid_dir = os.path.join(DATA_DIR, 'annotations/val')
x_test_dir = os.path.join(DATA_DIR, 'images/val')
y_test_dir = os.path.join(DATA_DIR, 'annotations/val')
"""### Helper Functions"""
# helper function for data visualization
def visualize(**images):
"""PLot images in one row."""
n = len(images)
plt.figure(figsize=(16, 5))
ns=''
for i, (name, image) in enumerate(images.items()):
plt.subplot(1, n, i + 1)
plt.xticks([])
plt.yticks([])
plt.title(' '.join(name.split('_')).title())
plt.imshow(image)
ns=f'{name}_{i}'
# plt.show()
plt.savefig(ns+'.png', bbox_inches='tight')
# helper function for data visualization
def denormalize(x):
"""Scale image to range 0..1 for correct plot"""
x_max = np.percentile(x, 98)
x_min = np.percentile(x, 2)
x = (x - x_min) / (x_max - x_min)
x = x.clip(0, 1)
return x
def round_clip_0_1(x, **kwargs):
return x.round().clip(0, 1)
"""## Data Augmentation Functions"""
# define heavy augmentations
def get_training_augmentation(height, width):
train_transform = [
A.PadIfNeeded(min_height=height, min_width=width, always_apply=True, border_mode=0),
A.Resize(height, width, always_apply=True)
]
return A.Compose(train_transform)
def get_validation_augmentation(height, width):
"""Add paddings to make image shape divisible by 32"""
test_transform = [
A.PadIfNeeded(height, width),
A.Resize(height, width, always_apply=True)
]
return A.Compose(test_transform)
def data_get_preprocessing(preprocessing_fn):
"""Construct preprocessing transform
Args:
preprocessing_fn (callbale): data normalization function
(can be specific for each pretrained neural network)
Return:
transform: albumentations.Compose
"""
_transform = [
A.Lambda(image=preprocessing_fn),
]
return A.Compose(_transform)
"""## Define some global variables"""
TOTAL_CLASSES = ['background', 'headerlogo', 'twocoltabel', 'recieveraddress', 'text', 'senderaddress', 'ortdatum',
'companyinfo', 'fulltabletyp1', 'fulltabletyp2', 'copylogo', 'footerlogo', 'footertext', 'signatureimage', 'fulltabletyp3', 'unlabelled']
MODEL_CLASSES = TOTAL_CLASSES
ALL_CLASSES = False
if MODEL_CLASSES == TOTAL_CLASSES:
MODEL_CLASSES = MODEL_CLASSES[:-1]
ALL_CLASSES = True
BATCH_SIZE = 1
N_CLASSES = 16
HEIGHT = 704
WIDTH = 704
BACKBONE_NAME = "efficientnetb3"
WEIGHTS = "imagenet"
WWO_AUG = False # train data with and without augmentation
"""### Functions to calculate appropriate class weights"""
################################################################################
# Class Weights
################################################################################
def get_dataset_counts(d):
pixel_count = np.array([i for i in d.values()])
sum_pixel_count = 0
for i in pixel_count:
sum_pixel_count += i
return pixel_count, sum_pixel_count
def get_dataset_statistics(pixel_count, sum_pixel_count):
pixel_frequency = np.round(pixel_count / sum_pixel_count, 4)
mean_pixel_frequency = np.round(np.mean(pixel_frequency), 4)
return pixel_frequency, mean_pixel_frequency
def get_balancing_class_weights(classes, d):
pixel_count, sum_pixel_count = get_dataset_counts(d)
background_pixel_count = 0
mod_pixel_count = []
for c in TOTAL_CLASSES:
if c not in classes:
background_pixel_count += d[c]
else:
mod_pixel_count.append(d[c])
mod_pixel_count.append(background_pixel_count)
pixel_frequency, mean_pixel_frequency = get_dataset_statistics(mod_pixel_count, sum_pixel_count)
class_weights = np.round(mean_pixel_frequency / pixel_frequency, 2)
return class_weights
# class_weights = get_balancing_class_weights(MODEL_CLASSES, CLASSES_PIXEL_COUNT_DICT)
# print(class_weights)
def save_seg_result(image, pred_mask, gt_mask, image_id, class_names):
# save predict mask as PNG image
mask_dir = os.path.join('result','predict_mask')
os.makedirs(mask_dir, exist_ok=True)
label_save(os.path.join(mask_dir, str(image_id)+'.png'), pred_mask)
# visualize segmentation result
title_str = 'Predict Segmentation\nmIOU: '
gt_title_str = 'GT Segmentation'
image_array = visualize_segmentation(image, pred_mask, gt_mask, class_names=class_names, title=title_str, gt_title=gt_title_str, ignore_count_threshold=1)
# save result as JPG
result_dir = os.path.join('result','segmentation')
os.makedirs(result_dir, exist_ok=True)
result_file = os.path.join(result_dir, str(image_id)+'.jpg')
Image.fromarray(image_array).save(result_file)
"""## Data Generation Functions"""
################################################################################
# Data Generator
################################################################################
def create_image_label_path_generator(images_dir, masks_dir, shuffle=False, seed=None):
ids = sorted(os.listdir(images_dir))
mask_ids = sorted(os.listdir(masks_dir))
if shuffle == True:
if seed is not None:
tf.random.set_seed(seed)
indices = tf.range(start=0, limit=tf.shape(ids)[0], dtype=tf.int32)
shuffled_indices = tf.random.shuffle(indices)
ids = tf.gather(ids, shuffled_indices).numpy().astype(str)
mask_ids = tf.gather(mask_ids, shuffled_indices).numpy().astype(str)
images_fps = [os.path.join(images_dir, image_id) for image_id in ids]
masks_fps = [os.path.join(masks_dir, image_id) for image_id in mask_ids]
while True:
for i in range(len(images_fps)):
yield [images_fps[i], masks_fps[i]]
def process_image_label(images_paths, masks_paths, classes, augmentation=None, preprocessing=None):
class_values = [TOTAL_CLASSES.index(cls.lower()) for cls in classes]
# read data
image = cv2.imread(images_paths)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
mask = cv2.imread(masks_paths, 0)
# extract certain classes from mask (e.g. cars)
masks = [(mask == v) for v in class_values]
mask = np.stack(masks, axis=-1).astype('float')
# add background if mask is not binary
if mask.shape[-1] != 1:
background = 1 - mask.sum(axis=-1, keepdims=True)
mask = np.concatenate((mask, background), axis=-1)
# apply augmentations
if augmentation:
sample = augmentation(image=image, mask=mask)
image, mask = sample['image'], sample['mask']
# apply preprocessing
if preprocessing:
sample = preprocessing(image=image, mask=mask)
image, mask = sample['image'], sample['mask']
# mask = np.squeeze(np.argmax(mask, axis=-1))
# mask = np.argmax(mask, axis=-1)
# mask = mask[..., np.newaxis]
return image, mask
def DataGenerator(train_dir, label_dir, batch_size, height, width, classes, augmentation, wwo_aug=False, shuffle=False, seed=None):
image_label_path_generator = create_image_label_path_generator(
train_dir, label_dir, shuffle=shuffle, seed=seed
)
if wwo_aug:
while True:
images = np.zeros(shape=[batch_size, height, width, 3])
labels = np.zeros(shape=[batch_size, height, width, len(classes) + 1], dtype=np.float32)
for i in range(0, batch_size, 2):
image_path, label_path = next(image_label_path_generator)
image_aug, label_aug = process_image_label(image_path, label_path, classes=classes, augmentation=augmentation)
image_wo_aug, label_wo_aug = process_image_label(image_path, label_path, classes=classes, augmentation=get_validation_augmentation(height=HEIGHT, width=WIDTH))
images[i], labels[i] = image_aug, label_aug
images[i + 1], labels[i + 1] = image_wo_aug, label_wo_aug
yield tf.convert_to_tensor(images), tf.convert_to_tensor(labels, tf.float32)
else:
while True:
images = np.zeros(shape=[batch_size, height, width, 3])
labels = np.zeros(shape=[batch_size, height, width, len(classes) + 1], dtype=np.float32)
for i in range(batch_size):
image_path, label_path = next(image_label_path_generator)
image, label = process_image_label(image_path, label_path, classes=classes, augmentation=augmentation)
images[i], labels[i] = image, label
yield tf.convert_to_tensor(images), tf.convert_to_tensor(labels, tf.float32)
"""## Create the Model"""
base_model, layers, layer_names = tasm.create_base_model(name=BACKBONE_NAME, weights=WEIGHTS, height=HEIGHT, width=WIDTH, include_top=False, pooling=None)
BACKBONE_TRAINABLE = True
model = tasm.DANet(n_classes=N_CLASSES, base_model=base_model, output_layers=layers, backbone_trainable=BACKBONE_TRAINABLE)
model.load_weights("DANet.ckpt")
def display(display_list):
plt.figure(figsize=(15, 15))
title = ['Input Image', 'True Mask', 'Predicted Mask']
for i in range(len(display_list)):
plt.subplot(1, len(display_list), i+1)
plt.title(title[i])
plt.imshow(tf.keras.preprocessing.image.array_to_img(display_list[i]))
plt.axis('off')
plt.show()
def create_mask(pred_mask):
pred_mask = tf.argmax(pred_mask, axis=-1)
pred_mask = pred_mask[..., tf.newaxis]
return pred_mask[0]
def show_predictions(dataset=None, num=1):
output_model = model(sample_image[tf.newaxis, ...])
# print(output_model.numpy())
output_mask = create_mask(output_model)
# print(sample_mask.shape)
scce = tf.keras.losses.CategoricalCrossentropy()
print("SparseCategoricalCrossentroy: " + str(scce(sample_mask, output_model[0]).numpy()))
print("Iou-Score: " + str(tasm.losses.iou_score(sample_mask, output_model[0]).numpy()))
print("categorical Focal Dice Loss: " + str(categorical_focal_dice_loss(sample_mask, output_model[0]).numpy()))
display([sample_image, sample_mask, K.one_hot(K.squeeze(output_mask, axis=-1), 3)])
# show_predictions()
val_shuffle = True
seed = 29598
ValidationSet = DataGenerator(
x_valid_dir,
y_valid_dir,
1,
HEIGHT,
WIDTH,
classes=MODEL_CLASSES,
augmentation=get_validation_augmentation(height=HEIGHT, width=WIDTH),
shuffle=val_shuffle,
seed=seed
)
"""# Evaluation on Test Data"""
# scores = model.evaluate(TestSet, steps=101)
# print("Loss: {:.5}".format(scores[0]))
# for metric, value in zip(metrics, scores[1:]):
# if metric != "accuracy":
# metric = metric.__name__
# print("mean {}: {:.5}".format(metric, value))
# """## Visual Examples on Test Data"""
n = 5
ids = np.random.choice(np.arange(101), size=n,replace=False)
# print(ids)
counter = 0
second_counter = 0
for i in ValidationSet:
if counter in ids:
image, gt_mask = i
# image = np.expand_dims(image, axis=0)
pr_mask = model.predict(image)
pr_mask = np.argmax(pr_mask, axis=-1)
gt_mask = | np.argmax(gt_mask, axis=-1) | numpy.argmax |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for recurrent layers functionality other than GRU, LSTM, SimpleRNN.
See also: lstm_test.py, gru_test.py, simplernn_test.py.
"""
import collections
import numpy as np
import tensorflow.compat.v2 as tf
from absl.testing import parameterized
import keras
from keras.engine import base_layer_utils
from keras.layers.rnn import gru
from keras.layers.rnn import gru_v1
from keras.layers.rnn import lstm
from keras.layers.rnn import lstm_v1
from keras.testing_infra import test_combinations
from keras.testing_infra import test_utils
from keras.utils import generic_utils
# isort: off
from tensorflow.python.training.tracking import (
util as trackable_util,
)
# Used for nested input/output/state RNN test.
NestedInput = collections.namedtuple("NestedInput", ["t1", "t2"])
NestedState = collections.namedtuple("NestedState", ["s1", "s2"])
@test_combinations.run_all_keras_modes
class RNNTest(test_combinations.TestCase):
def test_minimal_rnn_cell_non_layer(self):
class MinimalRNNCell:
def __init__(self, units, input_dim):
self.units = units
self.state_size = units
self.kernel = keras.backend.variable(
np.random.random((input_dim, units))
)
def call(self, inputs, states):
prev_output = states[0]
output = keras.backend.dot(inputs, self.kernel) + prev_output
return output, [output]
# Basic test case.
cell = MinimalRNNCell(32, 5)
x = keras.Input((None, 5))
layer = keras.layers.RNN(cell)
y = layer(x)
model = keras.models.Model(x, y)
model.compile(
optimizer="rmsprop",
loss="mse",
run_eagerly=test_utils.should_run_eagerly(),
)
model.train_on_batch(np.zeros((6, 5, 5)), np.zeros((6, 32)))
# Test stacking.
cells = [
MinimalRNNCell(8, 5),
MinimalRNNCell(32, 8),
MinimalRNNCell(32, 32),
]
layer = keras.layers.RNN(cells)
y = layer(x)
model = keras.models.Model(x, y)
model.compile(
optimizer="rmsprop",
loss="mse",
run_eagerly=test_utils.should_run_eagerly(),
)
model.train_on_batch(np.zeros((6, 5, 5)), np.zeros((6, 32)))
def test_minimal_rnn_cell_non_layer_multiple_states(self):
class MinimalRNNCell:
def __init__(self, units, input_dim):
self.units = units
self.state_size = (units, units)
self.kernel = keras.backend.variable(
np.random.random((input_dim, units))
)
def call(self, inputs, states):
prev_output_1 = states[0]
prev_output_2 = states[1]
output = keras.backend.dot(inputs, self.kernel)
output += prev_output_1
output -= prev_output_2
return output, [output * 2, output * 3]
# Basic test case.
cell = MinimalRNNCell(32, 5)
x = keras.Input((None, 5))
layer = keras.layers.RNN(cell)
y = layer(x)
model = keras.models.Model(x, y)
model.compile(
optimizer="rmsprop",
loss="mse",
run_eagerly=test_utils.should_run_eagerly(),
)
model.train_on_batch(np.zeros((6, 5, 5)), np.zeros((6, 32)))
# Test stacking.
cells = [
MinimalRNNCell(8, 5),
MinimalRNNCell(16, 8),
MinimalRNNCell(32, 16),
]
layer = keras.layers.RNN(cells)
self.assertEqual(layer.cell.state_size, ((8, 8), (16, 16), (32, 32)))
self.assertEqual(layer.cell.output_size, 32)
y = layer(x)
model = keras.models.Model(x, y)
model.compile(
optimizer="rmsprop",
loss="mse",
run_eagerly=test_utils.should_run_eagerly(),
)
model.train_on_batch(np.zeros((6, 5, 5)), np.zeros((6, 32)))
def test_minimal_rnn_cell_layer(self):
class MinimalRNNCell(keras.layers.Layer):
def __init__(self, units, **kwargs):
self.units = units
self.state_size = units
super().__init__(**kwargs)
def build(self, input_shape):
self.kernel = self.add_weight(
shape=(input_shape[-1], self.units),
initializer="uniform",
name="kernel",
)
self.recurrent_kernel = self.add_weight(
shape=(self.units, self.units),
initializer="uniform",
name="recurrent_kernel",
)
self.built = True
def call(self, inputs, states):
prev_output = states[0]
h = keras.backend.dot(inputs, self.kernel)
output = h + keras.backend.dot(
prev_output, self.recurrent_kernel
)
return output, [output]
def get_config(self):
config = {"units": self.units}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
# Test basic case.
x = keras.Input((None, 5))
cell = MinimalRNNCell(32)
layer = keras.layers.RNN(cell)
y = layer(x)
model = keras.models.Model(x, y)
model.compile(
optimizer="rmsprop",
loss="mse",
run_eagerly=test_utils.should_run_eagerly(),
)
model.train_on_batch(np.zeros((6, 5, 5)), np.zeros((6, 32)))
# Test basic case serialization.
x_np = np.random.random((6, 5, 5))
y_np = model.predict(x_np)
weights = model.get_weights()
config = layer.get_config()
with generic_utils.CustomObjectScope(
{"MinimalRNNCell": MinimalRNNCell}
):
layer = keras.layers.RNN.from_config(config)
y = layer(x)
model = keras.models.Model(x, y)
model.set_weights(weights)
y_np_2 = model.predict(x_np)
self.assertAllClose(y_np, y_np_2, atol=1e-4)
# Test stacking.
cells = [MinimalRNNCell(8), MinimalRNNCell(12), MinimalRNNCell(32)]
layer = keras.layers.RNN(cells)
y = layer(x)
model = keras.models.Model(x, y)
model.compile(
optimizer="rmsprop",
loss="mse",
run_eagerly=test_utils.should_run_eagerly(),
)
model.train_on_batch(np.zeros((6, 5, 5)), np.zeros((6, 32)))
# Test stacked RNN serialization.
x_np = np.random.random((6, 5, 5))
y_np = model.predict(x_np)
weights = model.get_weights()
config = layer.get_config()
with generic_utils.CustomObjectScope(
{"MinimalRNNCell": MinimalRNNCell}
):
layer = keras.layers.RNN.from_config(config)
y = layer(x)
model = keras.models.Model(x, y)
model.set_weights(weights)
y_np_2 = model.predict(x_np)
self.assertAllClose(y_np, y_np_2, atol=1e-4)
def test_minimal_rnn_cell_abstract_rnn_cell(self):
class MinimalRNNCell(keras.layers.AbstractRNNCell):
def __init__(self, units, **kwargs):
self.units = units
super().__init__(**kwargs)
@property
def state_size(self):
return self.units
def build(self, input_shape):
self.kernel = self.add_weight(
shape=(input_shape[-1], self.units),
initializer="uniform",
name="kernel",
)
self.recurrent_kernel = self.add_weight(
shape=(self.units, self.units),
initializer="uniform",
name="recurrent_kernel",
)
self.built = True
def call(self, inputs, states):
prev_output = states[0]
h = keras.backend.dot(inputs, self.kernel)
output = h + keras.backend.dot(
prev_output, self.recurrent_kernel
)
return output, output
@property
def output_size(self):
return self.units
cell = MinimalRNNCell(32)
x = keras.Input((None, 5))
layer = keras.layers.RNN(cell)
y = layer(x)
model = keras.models.Model(x, y)
model.compile(
optimizer="rmsprop",
loss="mse",
run_eagerly=test_utils.should_run_eagerly(),
)
model.train_on_batch(np.zeros((6, 5, 5)), np.zeros((6, 32)))
# Test stacking.
cells = [MinimalRNNCell(8), MinimalRNNCell(16), MinimalRNNCell(32)]
layer = keras.layers.RNN(cells)
y = layer(x)
model = keras.models.Model(x, y)
model.compile(
optimizer="rmsprop",
loss="mse",
run_eagerly=test_utils.should_run_eagerly(),
)
model.train_on_batch(np.zeros((6, 5, 5)), np.zeros((6, 32)))
def test_rnn_with_time_major(self):
batch = 10
time_step = 5
embedding_dim = 4
units = 3
# Test basic case.
x = keras.Input((time_step, embedding_dim))
time_major_x = keras.layers.Lambda(
lambda t: tf.transpose(t, [1, 0, 2])
)(x)
layer = keras.layers.SimpleRNN(
units, time_major=True, return_sequences=True
)
self.assertEqual(
layer.compute_output_shape(
(time_step, None, embedding_dim)
).as_list(),
[time_step, None, units],
)
y = layer(time_major_x)
self.assertEqual(layer.output_shape, (time_step, None, units))
y = keras.layers.Lambda(lambda t: tf.transpose(t, [1, 0, 2]))(y)
model = keras.models.Model(x, y)
model.compile(
optimizer="rmsprop",
loss="mse",
run_eagerly=test_utils.should_run_eagerly(),
)
model.train_on_batch(
np.zeros((batch, time_step, embedding_dim)),
np.zeros((batch, time_step, units)),
)
# Test stacking.
x = keras.Input((time_step, embedding_dim))
time_major_x = keras.layers.Lambda(
lambda t: tf.transpose(t, [1, 0, 2])
)(x)
cell_units = [10, 8, 6]
cells = [keras.layers.SimpleRNNCell(cell_units[i]) for i in range(3)]
layer = keras.layers.RNN(cells, time_major=True, return_sequences=True)
y = layer(time_major_x)
self.assertEqual(layer.output_shape, (time_step, None, cell_units[-1]))
y = keras.layers.Lambda(lambda t: tf.transpose(t, [1, 0, 2]))(y)
model = keras.models.Model(x, y)
model.compile(
optimizer="rmsprop",
loss="mse",
run_eagerly=test_utils.should_run_eagerly(),
)
model.train_on_batch(
np.zeros((batch, time_step, embedding_dim)),
np.zeros((batch, time_step, cell_units[-1])),
)
# Test masking.
x = keras.Input((time_step, embedding_dim))
time_major = keras.layers.Lambda(lambda t: tf.transpose(t, [1, 0, 2]))(
x
)
mask = keras.layers.Masking()(time_major)
rnn = keras.layers.SimpleRNN(
units, time_major=True, return_sequences=True
)(mask)
y = keras.layers.Lambda(lambda t: tf.transpose(t, [1, 0, 2]))(rnn)
model = keras.models.Model(x, y)
model.compile(
optimizer="rmsprop",
loss="mse",
run_eagerly=test_utils.should_run_eagerly(),
)
model.train_on_batch(
np.zeros((batch, time_step, embedding_dim)),
np.zeros((batch, time_step, units)),
)
# Test layer output
x = keras.Input((time_step, embedding_dim))
rnn_1 = keras.layers.SimpleRNN(units, return_sequences=True)
y = rnn_1(x)
model = keras.models.Model(x, y)
model.compile(
optimizer="rmsprop",
loss="mse",
run_eagerly=test_utils.should_run_eagerly(),
)
model.train_on_batch(
np.zeros((batch, time_step, embedding_dim)),
np.zeros((batch, time_step, units)),
)
x_np = np.random.random((batch, time_step, embedding_dim))
y_np_1 = model.predict(x_np)
time_major = keras.layers.Lambda(lambda t: tf.transpose(t, [1, 0, 2]))(
x
)
rnn_2 = keras.layers.SimpleRNN(
units, time_major=True, return_sequences=True
)
y_2 = rnn_2(time_major)
y_2 = keras.layers.Lambda(lambda t: tf.transpose(t, [1, 0, 2]))(y_2)
model_2 = keras.models.Model(x, y_2)
rnn_2.set_weights(rnn_1.get_weights())
y_np_2 = model_2.predict(x_np)
self.assertAllClose(y_np_1, y_np_2, atol=1e-4)
def test_rnn_cell_with_constants_layer(self):
# Test basic case.
x = keras.Input((None, 5))
c = keras.Input((3,))
cell = RNNCellWithConstants(32, constant_size=3)
layer = keras.layers.RNN(cell)
y = layer(x, constants=c)
model = keras.models.Model([x, c], y)
model.compile(
optimizer="rmsprop",
loss="mse",
run_eagerly=test_utils.should_run_eagerly(),
)
model.train_on_batch(
[np.zeros((6, 5, 5)), np.zeros((6, 3))], np.zeros((6, 32))
)
# Test basic case serialization.
x_np = np.random.random((6, 5, 5))
c_np = np.random.random((6, 3))
y_np = model.predict([x_np, c_np])
weights = model.get_weights()
config = layer.get_config()
custom_objects = {"RNNCellWithConstants": RNNCellWithConstants}
with generic_utils.CustomObjectScope(custom_objects):
layer = keras.layers.RNN.from_config(config.copy())
y = layer(x, constants=c)
model = keras.models.Model([x, c], y)
model.set_weights(weights)
y_np_2 = model.predict([x_np, c_np])
self.assertAllClose(y_np, y_np_2, atol=1e-4)
# test flat list inputs.
with generic_utils.CustomObjectScope(custom_objects):
layer = keras.layers.RNN.from_config(config.copy())
y = layer([x, c])
model = keras.models.Model([x, c], y)
model.set_weights(weights)
y_np_3 = model.predict([x_np, c_np])
self.assertAllClose(y_np, y_np_3, atol=1e-4)
# Test stacking.
cells = [
gru.GRUCell(8),
RNNCellWithConstants(12, constant_size=3),
RNNCellWithConstants(32, constant_size=3),
]
layer = keras.layers.RNN(cells)
y = layer(x, constants=c)
model = keras.models.Model([x, c], y)
model.compile(
optimizer="rmsprop",
loss="mse",
run_eagerly=test_utils.should_run_eagerly(),
)
model.train_on_batch(
[np.zeros((6, 5, 5)), np.zeros((6, 3))], np.zeros((6, 32))
)
# Test GRUCell reset_after property.
x = keras.Input((None, 5))
c = keras.Input((3,))
cells = [gru.GRUCell(32, reset_after=True)]
layer = keras.layers.RNN(cells)
y = layer(x, constants=c)
model = keras.models.Model([x, c], y)
model.compile(
optimizer="rmsprop",
loss="mse",
run_eagerly=test_utils.should_run_eagerly(),
)
model.train_on_batch(
[np.zeros((6, 5, 5)), np.zeros((6, 3))], np.zeros((6, 32))
)
# Test stacked RNN serialization
x_np = np.random.random((6, 5, 5))
c_np = np.random.random((6, 3))
y_np = model.predict([x_np, c_np])
weights = model.get_weights()
config = layer.get_config()
with generic_utils.CustomObjectScope(custom_objects):
layer = keras.layers.RNN.from_config(config.copy())
y = layer(x, constants=c)
model = keras.models.Model([x, c], y)
model.set_weights(weights)
y_np_2 = model.predict([x_np, c_np])
self.assertAllClose(y_np, y_np_2, atol=1e-4)
def test_rnn_cell_with_non_keras_constants(self):
# Test basic case.
x = keras.Input((None, 5))
c = tf.zeros([6, 3], dtype=tf.float32)
cell = RNNCellWithConstants(32, constant_size=3)
layer = keras.layers.RNN(cell)
y = layer(x, constants=c)
model = keras.models.Model(x, y)
model.compile(
optimizer="rmsprop",
loss="mse",
run_eagerly=test_utils.should_run_eagerly(),
)
model.train_on_batch(np.zeros((6, 5, 5)), np.zeros((6, 32)))
# Test stacking.
cells = [
gru.GRUCell(8),
RNNCellWithConstants(12, constant_size=3),
RNNCellWithConstants(32, constant_size=3),
]
layer = keras.layers.RNN(cells)
y = layer(x, constants=c)
model = keras.models.Model(x, y)
model.compile(
optimizer="rmsprop",
loss="mse",
run_eagerly=test_utils.should_run_eagerly(),
)
model.train_on_batch(np.zeros((6, 5, 5)), np.zeros((6, 32)))
def test_rnn_cell_with_constants_layer_passing_initial_state(self):
# Test basic case.
x = keras.Input((None, 5))
c = keras.Input((3,))
s = keras.Input((32,))
cell = RNNCellWithConstants(32, constant_size=3)
layer = keras.layers.RNN(cell)
y = layer(x, initial_state=s, constants=c)
model = keras.models.Model([x, s, c], y)
model.compile(
optimizer="rmsprop",
loss="mse",
run_eagerly=test_utils.should_run_eagerly(),
)
model.train_on_batch(
[np.zeros((6, 5, 5)), np.zeros((6, 32)), np.zeros((6, 3))],
np.zeros((6, 32)),
)
# Test basic case serialization.
x_np = np.random.random((6, 5, 5))
s_np = np.random.random((6, 32))
c_np = np.random.random((6, 3))
y_np = model.predict([x_np, s_np, c_np])
weights = model.get_weights()
config = layer.get_config()
custom_objects = {"RNNCellWithConstants": RNNCellWithConstants}
with generic_utils.CustomObjectScope(custom_objects):
layer = keras.layers.RNN.from_config(config.copy())
y = layer(x, initial_state=s, constants=c)
model = keras.models.Model([x, s, c], y)
model.set_weights(weights)
y_np_2 = model.predict([x_np, s_np, c_np])
self.assertAllClose(y_np, y_np_2, atol=1e-4)
# verify that state is used
y_np_2_different_s = model.predict([x_np, s_np + 10.0, c_np])
with self.assertRaises(AssertionError):
self.assertAllClose(y_np, y_np_2_different_s, atol=1e-4)
# test flat list inputs
with generic_utils.CustomObjectScope(custom_objects):
layer = keras.layers.RNN.from_config(config.copy())
y = layer([x, s, c])
model = keras.models.Model([x, s, c], y)
model.set_weights(weights)
y_np_3 = model.predict([x_np, s_np, c_np])
self.assertAllClose(y_np, y_np_3, atol=1e-4)
def test_rnn_cell_with_non_keras_constants_and_initial_state(self):
# Test basic case.
x = keras.Input((None, 5))
c = tf.zeros([6, 3], dtype=tf.float32)
s = tf.zeros([6, 32], dtype=tf.float32)
cell = RNNCellWithConstants(32, constant_size=3)
layer = keras.layers.RNN(cell)
y = layer(x, initial_state=s, constants=c)
model = keras.models.Model(x, y)
model.compile(
optimizer="rmsprop",
loss="mse",
run_eagerly=test_utils.should_run_eagerly(),
)
model.train_on_batch(np.zeros((6, 5, 5)), np.zeros((6, 32)))
# Test stacking.
cells = [
gru.GRUCell(8),
RNNCellWithConstants(12, constant_size=3),
RNNCellWithConstants(32, constant_size=3),
]
layer = keras.layers.RNN(cells)
s = [
tf.zeros([6, 8], dtype=tf.float32),
tf.zeros([6, 12], dtype=tf.float32),
tf.zeros([6, 32], dtype=tf.float32),
]
y = layer(x, initial_state=s, constants=c)
model = keras.models.Model(x, y)
model.compile(
optimizer="rmsprop",
loss="mse",
run_eagerly=test_utils.should_run_eagerly(),
)
model.train_on_batch(np.zeros((6, 5, 5)), np.zeros((6, 32)))
def test_stacked_rnn_attributes(self):
if tf.executing_eagerly():
self.skipTest("reduce_sum is not available in eager mode.")
cells = [keras.layers.LSTMCell(1), keras.layers.LSTMCell(1)]
layer = keras.layers.RNN(cells)
layer.build((None, None, 1))
# Test weights
self.assertEqual(len(layer.trainable_weights), 6)
cells[0].trainable = False
self.assertEqual(len(layer.trainable_weights), 3)
self.assertEqual(len(layer.non_trainable_weights), 3)
# Test `get_losses_for` and `losses`
x = keras.Input((None, 1))
loss_1 = tf.reduce_sum(x)
loss_2 = tf.reduce_sum(cells[0].kernel)
cells[0].add_loss(loss_1, inputs=x)
cells[0].add_loss(loss_2)
self.assertEqual(len(layer.losses), 2)
self.assertEqual(layer.get_losses_for(None), [loss_2])
self.assertEqual(layer.get_losses_for(x), [loss_1])
# Test `updates`
cells = [keras.layers.LSTMCell(1), keras.layers.LSTMCell(1)]
layer = keras.layers.RNN(cells)
x = keras.Input((None, 1))
_ = layer(x)
update_1 = tf.compat.v1.assign_add(
cells[0].kernel, x[0, 0, 0] * cells[0].kernel
)
update_2 = tf.compat.v1.assign_add(
cells[0].kernel, tf.ones_like(cells[0].kernel)
)
# TODO(b/128682878): Remove when RNNCells are __call__'d.
with base_layer_utils.call_context().enter(layer, x, True, None):
cells[0].add_update(update_1)
cells[0].add_update(update_2)
self.assertEqual(len(layer.updates), 2)
def test_rnn_dynamic_trainability(self):
layer_class = keras.layers.SimpleRNN
embedding_dim = 4
units = 3
layer = layer_class(units)
layer.build((None, None, embedding_dim))
self.assertEqual(len(layer.weights), 3)
self.assertEqual(len(layer.trainable_weights), 3)
self.assertEqual(len(layer.non_trainable_weights), 0)
layer.trainable = False
self.assertEqual(len(layer.weights), 3)
self.assertEqual(len(layer.trainable_weights), 0)
self.assertEqual(len(layer.non_trainable_weights), 3)
layer.trainable = True
self.assertEqual(len(layer.weights), 3)
self.assertEqual(len(layer.trainable_weights), 3)
self.assertEqual(len(layer.non_trainable_weights), 0)
@parameterized.parameters(
[keras.layers.SimpleRNN, keras.layers.GRU, keras.layers.LSTM]
)
def test_rnn_cell_trainability(self, layer_cls):
# https://github.com/tensorflow/tensorflow/issues/32369.
layer = layer_cls(3, trainable=False)
self.assertFalse(layer.cell.trainable)
layer.trainable = True
self.assertTrue(layer.cell.trainable)
def test_state_reuse_with_dropout(self):
layer_class = keras.layers.SimpleRNN
embedding_dim = 4
units = 3
timesteps = 2
num_samples = 2
input1 = keras.Input(
batch_shape=(num_samples, timesteps, embedding_dim)
)
layer = layer_class(
units, return_state=True, return_sequences=True, dropout=0.2
)
state = layer(input1)[1:]
input2 = keras.Input(
batch_shape=(num_samples, timesteps, embedding_dim)
)
output = layer_class(units)(input2, initial_state=state)
model = keras.Model([input1, input2], output)
inputs = [
np.random.random((num_samples, timesteps, embedding_dim)),
np.random.random((num_samples, timesteps, embedding_dim)),
]
model.predict(inputs)
def test_builtin_and_custom_rnn_cell_serialization(self):
@keras.utils.generic_utils.register_keras_serializable(
package="TestOnly"
)
class CustomRNNCell(keras.layers.Layer):
def __init__(self, units, **kwargs):
self.units = units
self.state_size = units
super().__init__(**kwargs)
def build(self, input_shape):
self.kernel = self.add_weight(
shape=(input_shape[-1], self.units),
initializer="uniform",
name="kernel",
)
self.recurrent_kernel = self.add_weight(
shape=(self.units, self.units),
initializer="uniform",
name="recurrent_kernel",
)
self.built = True
def call(self, inputs, states):
prev_output = states[0]
h = keras.backend.dot(inputs, self.kernel)
output = h + keras.backend.dot(
prev_output, self.recurrent_kernel
)
return output, [output]
def get_config(self):
config = {"units": self.units}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
for cell_class in [
keras.layers.SimpleRNNCell,
keras.layers.GRUCell,
keras.layers.LSTMCell,
CustomRNNCell,
]:
# Test basic case.
x = keras.Input((None, 5))
cell = cell_class(32)
layer = keras.layers.RNN(cell)
y = layer(x)
model = keras.models.Model(x, y)
model.compile(
optimizer="rmsprop",
loss="mse",
run_eagerly=test_utils.should_run_eagerly(),
)
# Test basic case serialization.
x_np = np.random.random((6, 5, 5))
y_np = model.predict(x_np)
weights = model.get_weights()
config = layer.get_config()
layer = keras.layers.RNN.from_config(config)
y = layer(x)
model = keras.models.Model(x, y)
model.set_weights(weights)
y_np_2 = model.predict(x_np)
self.assertAllClose(y_np, y_np_2, atol=1e-4)
# Test stacking.
cells = [cell_class(8), cell_class(12), cell_class(32)]
layer = keras.layers.RNN(cells)
y = layer(x)
model = keras.models.Model(x, y)
model.compile(
optimizer="rmsprop",
loss="mse",
run_eagerly=test_utils.should_run_eagerly(),
)
# Test stacked RNN serialization.
x_np = np.random.random((6, 5, 5))
y_np = model.predict(x_np)
weights = model.get_weights()
config = layer.get_config()
layer = keras.layers.RNN.from_config(config)
y = layer(x)
model = keras.models.Model(x, y)
model.set_weights(weights)
y_np_2 = model.predict(x_np)
self.assertAllClose(y_np, y_np_2, atol=1e-4)
@parameterized.named_parameters(
*test_utils.generate_combinations_with_testcase_name(
layer=[
keras.layers.SimpleRNN,
gru_v1.GRU,
lstm_v1.LSTM,
gru.GRU,
lstm.LSTM,
],
unroll=[True, False],
)
)
def test_rnn_dropout(self, layer, unroll):
rnn_layer = layer(3, dropout=0.1, recurrent_dropout=0.1, unroll=unroll)
if not unroll:
x = keras.Input((None, 5))
else:
x = keras.Input((5, 5))
y = rnn_layer(x)
model = keras.models.Model(x, y)
model.compile("sgd", "mse", run_eagerly=test_utils.should_run_eagerly())
x_np = np.random.random((6, 5, 5))
y_np = np.random.random((6, 3))
model.train_on_batch(x_np, y_np)
@parameterized.named_parameters(
*test_utils.generate_combinations_with_testcase_name(
cell=[
keras.layers.SimpleRNNCell,
keras.layers.GRUCell,
keras.layers.LSTMCell,
],
unroll=[True, False],
)
)
def test_stacked_rnn_dropout(self, cell, unroll):
cells = [
cell(3, dropout=0.1, recurrent_dropout=0.1),
cell(3, dropout=0.1, recurrent_dropout=0.1),
]
layer = keras.layers.RNN(cells, unroll=unroll)
if not unroll:
x = keras.Input((None, 5))
else:
x = keras.Input((5, 5))
y = layer(x)
model = keras.models.Model(x, y)
model.compile("sgd", "mse", run_eagerly=test_utils.should_run_eagerly())
x_np = np.random.random((6, 5, 5))
y_np = np.random.random((6, 3))
model.train_on_batch(x_np, y_np)
def test_dropout_mask_reuse(self):
# The layer is created with recurrent_initializer = zero, so that the
# the recurrent state won't affect the output. By doing this, we can
# verify the output and see if the same mask is applied to for each
# timestep.
layer_1 = keras.layers.SimpleRNN(
3,
dropout=0.5,
kernel_initializer="ones",
recurrent_initializer="zeros",
return_sequences=True,
unroll=True,
)
layer_2 = keras.layers.RNN(
keras.layers.SimpleRNNCell(
3,
dropout=0.5,
kernel_initializer="ones",
recurrent_initializer="zeros",
),
return_sequences=True,
unroll=True,
)
layer_3 = keras.layers.RNN(
[
keras.layers.SimpleRNNCell(
3,
dropout=0.5,
kernel_initializer="ones",
recurrent_initializer="zeros",
),
keras.layers.SimpleRNNCell(
3,
dropout=0.5,
kernel_initializer="ones",
recurrent_initializer="zeros",
),
],
return_sequences=True,
unroll=True,
)
def verify(rnn_layer):
inputs = tf.constant(1.0, shape=(6, 2, 5))
out = rnn_layer(inputs, training=True)
if not tf.executing_eagerly():
self.evaluate(tf.compat.v1.global_variables_initializer())
batch_1 = self.evaluate(out)
batch_1_t0, batch_1_t1 = batch_1[:, 0, :], batch_1[:, 1, :]
self.assertAllClose(batch_1_t0, batch_1_t1)
# This simulate the layer called with multiple batches in eager mode
if tf.executing_eagerly():
out2 = rnn_layer(inputs, training=True)
else:
out2 = out
batch_2 = self.evaluate(out2)
batch_2_t0, batch_2_t1 = batch_2[:, 0, :], batch_2[:, 1, :]
self.assertAllClose(batch_2_t0, batch_2_t1)
# Also validate that different dropout is used by between batches.
self.assertNotAllClose(batch_1_t0, batch_2_t0)
self.assertNotAllClose(batch_1_t1, batch_2_t1)
for l in [layer_1, layer_2, layer_3]:
verify(l)
def test_stacked_rnn_compute_output_shape(self):
cells = [keras.layers.LSTMCell(3), keras.layers.LSTMCell(6)]
embedding_dim = 4
timesteps = 2
layer = keras.layers.RNN(
cells, return_state=True, return_sequences=True
)
output_shape = layer.compute_output_shape(
(None, timesteps, embedding_dim)
)
expected_output_shape = [
(None, timesteps, 6),
(None, 3),
(None, 3),
(None, 6),
(None, 6),
]
self.assertEqual(
[tuple(o.as_list()) for o in output_shape], expected_output_shape
)
# Test reverse_state_order = True for stacked cell.
stacked_cell = keras.layers.StackedRNNCells(
cells, reverse_state_order=True
)
layer = keras.layers.RNN(
stacked_cell, return_state=True, return_sequences=True
)
output_shape = layer.compute_output_shape(
(None, timesteps, embedding_dim)
)
expected_output_shape = [
(None, timesteps, 6),
(None, 6),
(None, 6),
(None, 3),
(None, 3),
]
self.assertEqual(
[tuple(o.as_list()) for o in output_shape], expected_output_shape
)
def test_stacked_rnn_with_training_param(self):
# See https://github.com/tensorflow/tensorflow/issues/32586
class CellWrapper(keras.layers.AbstractRNNCell):
def __init__(self, cell):
super().__init__()
self.cell = cell
@property
def state_size(self):
return self.cell.state_size
@property
def output_size(self):
return self.cell.output_size
def build(self, input_shape):
self.cell.build(input_shape)
self.built = True
def get_initial_state(
self, inputs=None, batch_size=None, dtype=None
):
return self.cell.get_initial_state(
inputs=inputs, batch_size=batch_size, dtype=dtype
)
def call(self, inputs, states, training=None, **kwargs):
assert training is not None
return self.cell(inputs, states=states, training=training)
cell = keras.layers.LSTMCell(32)
cell = CellWrapper(cell)
cell = keras.layers.StackedRNNCells([cell])
rnn = keras.layers.RNN(cell)
inputs = np.ones((8, 4, 16), dtype=np.float32)
rnn(inputs, training=True)
def test_stacked_rnn_with_nested_cell(self):
batch = 10
t = 5
i1, i2, i3 = 3, 4, 5
o11, o12, o13 = 2, 3, 4
o21, o22, o23 = 4, 5, 6
# test 1: use_tuple=False
cells = [NestedCell(o11, o12, o13), NestedCell(o21, o22, o23)]
rnn = keras.layers.RNN(cells, return_sequences=True, return_state=True)
input_1 = keras.Input((t, i1))
input_2 = keras.Input((t, i2, i3))
output1, output2, state1, state2 = rnn((input_1, input_2))
s11, s12 = state1
s21, s22 = state2
self.assertEqual(output1.shape.as_list(), [None, t, o21])
self.assertEqual(output2.shape.as_list(), [None, t, o22, o23])
self.assertEqual(s11.shape.as_list(), [None, o11])
self.assertEqual(s12.shape.as_list(), [None, o12, o13])
self.assertEqual(s21.shape.as_list(), [None, o21])
self.assertEqual(s22.shape.as_list(), [None, o22, o23])
model = keras.models.Model([input_1, input_2], [output1, output2])
model.compile(
optimizer="rmsprop",
loss="mse",
run_eagerly=test_utils.should_run_eagerly(),
)
model.train_on_batch(
[np.zeros((batch, t, i1)), np.zeros((batch, t, i2, i3))],
[np.zeros((batch, t, o21)), np.zeros((batch, t, o22, o23))],
)
self.assertEqual(
model.output_shape, [(None, t, o21), (None, t, o22, o23)]
)
# test 2: use_tuple=True
cells = [
NestedCell(o11, o12, o13, use_tuple=True),
NestedCell(o21, o22, o23),
]
rnn = keras.layers.RNN(cells, return_sequences=True, return_state=True)
input_1 = keras.Input((t, i1))
input_2 = keras.Input((t, i2, i3))
output1, output2, state1, state2 = rnn(
NestedInput(t1=input_1, t2=input_2)
)
s11, s12 = state1
s21, s22 = state2
self.assertEqual(output1.shape.as_list(), [None, t, o21])
self.assertEqual(output2.shape.as_list(), [None, t, o22, o23])
self.assertEqual(s11.shape.as_list(), [None, o11])
self.assertEqual(s12.shape.as_list(), [None, o12, o13])
self.assertEqual(s21.shape.as_list(), [None, o21])
self.assertEqual(s22.shape.as_list(), [None, o22, o23])
model = keras.models.Model([input_1, input_2], [output1, output2])
model.compile(
optimizer="rmsprop",
loss="mse",
run_eagerly=test_utils.should_run_eagerly(),
)
model.train_on_batch(
[np.zeros((batch, t, i1)), np.zeros((batch, t, i2, i3))],
[np.zeros((batch, t, o21)), np.zeros((batch, t, o22, o23))],
)
self.assertEqual(
model.output_shape, [(None, t, o21), (None, t, o22, o23)]
)
def test_trackable_dependencies(self):
rnn = keras.layers.SimpleRNN
x = np.random.random((2, 2, 2))
y = np.random.random((2, 2))
model = keras.models.Sequential()
model.add(rnn(2))
model.compile(
optimizer="rmsprop",
loss="mse",
run_eagerly=test_utils.should_run_eagerly(),
)
model.fit(x, y, epochs=1, batch_size=1)
# check whether the model variables are present in the
# trackable list of objects
checkpointed_objects = {
id(o) for o in trackable_util.list_objects(model)
}
for v in model.variables:
self.assertIn(id(v), checkpointed_objects)
def test_high_dimension_RNN(self):
# Basic test case.
unit_a = 10
unit_b = 20
input_a = 5
input_b = 10
batch = 32
time_step = 4
cell = Minimal2DRNNCell(unit_a, unit_b)
x = keras.Input((None, input_a, input_b))
layer = keras.layers.RNN(cell)
y = layer(x)
self.assertEqual(cell.state_size.as_list(), [unit_a, unit_b])
if not tf.executing_eagerly():
init_state = layer.get_initial_state(x)
self.assertEqual(len(init_state), 1)
self.assertEqual(
init_state[0].shape.as_list(), [None, unit_a, unit_b]
)
model = keras.models.Model(x, y)
model.compile(
optimizer="rmsprop",
loss="mse",
run_eagerly=test_utils.should_run_eagerly(),
)
model.train_on_batch(
np.zeros((batch, time_step, input_a, input_b)),
np.zeros((batch, unit_a, unit_b)),
)
self.assertEqual(model.output_shape, (None, unit_a, unit_b))
# Test stacking.
cells = [
Minimal2DRNNCell(unit_a, unit_b),
Minimal2DRNNCell(unit_a * 2, unit_b * 2),
Minimal2DRNNCell(unit_a * 4, unit_b * 4),
]
layer = keras.layers.RNN(cells)
y = layer(x)
model = keras.models.Model(x, y)
model.compile(
optimizer="rmsprop",
loss="mse",
run_eagerly=test_utils.should_run_eagerly(),
)
model.train_on_batch(
np.zeros((batch, time_step, input_a, input_b)),
np.zeros((batch, unit_a * 4, unit_b * 4)),
)
self.assertEqual(model.output_shape, (None, unit_a * 4, unit_b * 4))
def test_high_dimension_RNN_with_init_state(self):
unit_a = 10
unit_b = 20
input_a = 5
input_b = 10
batch = 32
time_step = 4
# Basic test case.
cell = Minimal2DRNNCell(unit_a, unit_b)
x = keras.Input((None, input_a, input_b))
s = keras.Input((unit_a, unit_b))
layer = keras.layers.RNN(cell)
y = layer(x, initial_state=s)
model = keras.models.Model([x, s], y)
model.compile(
optimizer="rmsprop",
loss="mse",
run_eagerly=test_utils.should_run_eagerly(),
)
model.train_on_batch(
[
np.zeros((batch, time_step, input_a, input_b)),
np.zeros((batch, unit_a, unit_b)),
],
| np.zeros((batch, unit_a, unit_b)) | numpy.zeros |
import numpy
import random
from glob import glob
from scipy import interpolate
from scipy.special import softmax
from scipy.stats import ttest_ind
from sklearn.model_selection import KFold
import sys
from scipy.stats import skew, kurtosis
import itertools
import collections
import errno
import os.path as osp
import pickle
import time
import shutil
from itertools import count
from sklearn.metrics import confusion_matrix, f1_score, precision_score, roc_auc_score, recall_score
from sklearn.metrics import average_precision_score
from sklearn.metrics import accuracy_score, classification_report, cohen_kappa_score, roc_curve, precision_recall_curve
from typing import List
from datetime import datetime
import sklearn.metrics as metrics
from mlxtend.plotting import plot_confusion_matrix as mlxtend_plot_confusion_matrix
from mlxtend.evaluate import confusion_matrix as mlxtend_confusion_matrix
import numpy as np
import pandas as pd
import os
import matplotlib.pyplot as plt
from sklearn.manifold import TSNE
from inspect import signature
from sklearn.preprocessing import StandardScaler
from tqdm import tqdm
from pathlib import Path
def get_project_root() -> Path:
return Path(__file__).parent.parent
def one_hot_array(label_array: np.array, total_classes):
assert len(label_array.shape) == 1, print("label_array must be 1D array")
tmp = np.zeros(shape=(label_array.shape[0], total_classes), dtype=np.float)
tmp[np.arange(label_array.size), label_array] = 1.0
return tmp
def load_tf_model(model_path=''):
import tensorflow as tf
with tf.Session() as sess:
loaded_saver = tf.train.import_meta_graph(model_path)
loaded_saver.restore(sess, tf.train.latest_checkpoint('/'))
print(sess.run('w1:0'))
return sess
def get_all_folders_include_sub(path):
folders = [x[0] for x in os.walk(path)]
return folders
def get_char_split_symbol():
if sys.platform == "win32":
sp = "\\"
else:
sp = "/"
return sp
def get_all_files_include_sub(path, file_type):
files = []
# r=root, d=directories, f = files
for r, d, f in os.walk(path):
for file in f:
if file_type in file[-len(file_type):]:
files.append(os.path.join(os.path.abspath(r), file))
return files
def plot_train_history(history, title):
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(len(loss))
plt.figure()
plt.plot(epochs, loss, 'b', label='Training loss')
plt.plot(epochs, val_loss, 'r', label='Validation loss')
plt.title(title)
plt.legend()
plt.show()
def standardize_df_given_feature(df, features=[], scaler=None, df_name="", simple_method=True):
assert len(features) > 0, print("feature length must greater than 0")
scaler_dic = {}
# check if the df contains nan or inf
if simple_method:
print("pre-processing dataset frame using simple method")
df[features] = df[features].replace([np.inf, -np.inf], np.nan)
df[features] = df[features].fillna(df[features].mean())
# df[~np.isfinite(df)] = np.nan
nan = df[df.isnull().any(axis=1)]
if nan.shape[0] > 0:
print("df contains nan")
inf = df[df.eq(np.inf).any(axis=1)]
if inf.shape[0] > 0:
print("df contains inf")
else:
print("pre-processing dataset frame using comprehensive method")
for feature in features:
# print("quality check on %s for column name: % s" % (df_name, feature))
if df[feature].isnull().values.any():
df[feature] = df[feature].replace(np.nan,
df[~df[feature].isin([np.nan, np.inf, -np.inf])][feature].mean())
if df[feature].isin([np.inf]).values.any():
df[feature] = df[feature].replace(np.inf,
df[~df[feature].isin([np.nan, np.inf, -np.inf])][feature].max())
if df[feature].isin([-np.inf]).values.any():
df[feature] = df[feature].replace(-np.inf,
df[~df[feature].isin([np.nan, np.inf, -np.inf])][feature].min())
df[feature] = df[feature].replace([np.nan, np.inf, -np.inf], 0.0)
if scaler is None:
scaler = StandardScaler()
print(' Not given scaler start training scaler now!')
scaler.fit(df[features])
print('start transform dataset frame :%s' % df_name)
df[features] = scaler.transform(df[features])
return scaler
def extract_x_y_new(df, seq_len, mesaid, label_posi='mid', feature=""):
df_x = df[df["mesaid"] == mesaid][[feature, "stages"]].copy()
y = df_x["stages"].astype(int).values # get the ground truth for y
del df_x["stages"]
if label_posi == 'mid':
if seq_len % 2 == 0: # even win_len
fw_end = np.ceil(seq_len / 2)
bw_end = np.floor(seq_len / 2)
else:
fw_end = np.round(seq_len / 2)
bw_end = np.round(seq_len / 2)
for s in range(1, fw_end):
df_x["shift_%d" % s] = df_x[feature].shift(s)
# as half of the sliding window has reversed order (these df columns)
columns = df_x.columns.tolist()
columns = columns[::-1] # or data_frame = data_frame.sort_index(ascending=True, axis=0)
df_x = df_x[columns]
for s in range(1, bw_end):
df_x["shift_-%d" % s] = df_x[feature].shift(-s)
else:
for s in range(1, seq_len):
df_x["shift_%d" % s] = df_x["activity"].shift(s)
x = df_x.fillna(-1).values
return x, y
def extract_x_y(df, seq_len, pid, label_posi='mid', feature="", id_col_name="mesaid", gt_col_name="stages"):
df_x = df[df[id_col_name] == pid][[feature, gt_col_name]].copy()
y = df_x[gt_col_name].astype(int).values # get the ground truth for y
del df_x[gt_col_name]
if label_posi == 'mid':
for s in range(1, round(seq_len / 2) + 1):
df_x["shift_%d" % s] = df_x[feature].shift(s)
# reverse columns
columns = df_x.columns.tolist()
columns = columns[::-1] # or data_frame = data_frame.sort_index(ascending=True, axis=0)
df_x = df_x[columns]
for s in range(1, round(seq_len / 2) + 1):
df_x["shift_-%d" % s] = df_x[feature].shift(-s)
else:
for s in range(1, seq_len + 1):
df_x["shift_%d" % s] = df_x["activity"].shift(s)
x = df_x.fillna(-1).values
return x, y
def get_data(df, seq_len, feature_list, pid_col_name='mesaid', gt_col_name="stages"):
# build dataset by participant ID, extract dataset using sliding window method.
final_x = []
# loop all mesa_ids
for feature in tqdm(feature_list):
pids = df[pid_col_name].unique()
x, y = extract_x_y(df, seq_len, pids[0], label_posi='mid', feature=feature, id_col_name=pid_col_name,
gt_col_name=gt_col_name)
if len(pids) > 1:
for mid in pids[1:]:
x_tmp, y_tmp = extract_x_y(df, seq_len, mid, label_posi='mid', feature=feature,
id_col_name=pid_col_name,
gt_col_name=gt_col_name)
x = np.concatenate((x, x_tmp))
y = np.concatenate((y, y_tmp))
x = np.expand_dims(x, -1)
final_x.append(x)
combined_x = np.concatenate(final_x, axis=-1)
return combined_x, y
def standardize_features_to_array(df, scalers=None):
"""
This function will scale the dataset set use SK learn scaler function however we recommend do not pass a feature list
to the function as it may difficult to save the scaler list into H5py file
# fixme: need complete the code for the feature list, need return a scaler that was train from training dataset
# fixme: can be used for test dataset.
:param df:
:param features:
:param scaler:
:return:
"""
df = df.apply(lambda x: x.replace([np.nan], x[~x.isin([np.nan, np.inf, -np.inf])].mean()), axis=0)
df = df.apply(lambda x: x.replace([np.inf], x[~x.isin([np.nan, np.inf, -np.inf])].max()), axis=0)
df = df.apply(lambda x: x.replace([-np.inf], x[~x.isin([np.nan, np.inf, -np.inf])].min()), axis=0)
df = df.apply(lambda x: x.replace([np.nan, np.inf, -np.inf], 0.0), axis=0)
if scalers is not None:
df = scalers.transform(df)
else:
scaler = StandardScaler()
scaler.fit(df)
df = scaler.transform(df)
# the final check to replace any abnormal values
return df, scaler
def load_scaler(path, file_type=".pkl"):
scaler = None
if file_type == ".pkl":
with open(path, "rb") as f:
scaler = pickle.load(f)
return scaler
def load_h5_df_train_test_dataset(path):
""" this is only for the mesa dataset!!!!!"""
store = pd.HDFStore(path, 'r')
dftrain = store["train"]
dftest = store["test"]
feature_name = store["featnames"].values.tolist()
if type(feature_name[0]) is list:
feature_name = list(itertools.chain.from_iterable(feature_name))
store.close()
return dftrain, dftest, feature_name
def get_csv_files(data_path):
# Remove non-mat files, and perform ascending sort
print("searching csv files ...")
allfiles = os.listdir(data_path)
csv_files = []
for idx, f in enumerate(allfiles):
if ".csv" in f:
csv_files.append(os.path.join(data_path, f))
print("total found {} files".format(len(csv_files)))
csv_files.sort()
return csv_files
# TODO add argument that add the modality name in column name
def get_statistic_feature(df, column_name, windows_size=20):
"""
the function will directly change input argument dataset frame, so the argument isn't immutable
:param df:
:param column_name: the column name we want to extract its statistic features.
:param windows_size:
:return: feature_names : contains the features that extracted from the given window size.
"""
feature_names = []
for win_size in np.arange(1, windows_size):
df["_mean_%d" % win_size] = df[column_name].rolling(window=win_size, center=False,
min_periods=1).mean().fillna(0.0)
df["_mean_centered_%d" % win_size] = df[column_name].rolling(window=win_size, center=True,
min_periods=1).mean().fillna(0.0)
df["_median_%d" % win_size] = df[column_name].rolling(window=win_size, center=False,
min_periods=1).median().fillna(0.0)
df["_median_centered_%d" % win_size] = df[column_name].rolling(window=win_size, center=True,
min_periods=1).median().fillna(0.0)
df["_std_%d" % win_size] = df[column_name].rolling(window=win_size, center=False, min_periods=1).std().fillna(
0.0)
df["_std_centered_%d" % win_size] = df[column_name].rolling(window=win_size, center=True,
min_periods=1).std().fillna(0.0)
df["_max_%d" % win_size] = df[column_name].rolling(window=win_size, center=False, min_periods=1).max().fillna(
0.0)
df["_max_centered_%d" % win_size] = df[column_name].rolling(window=win_size, center=True,
min_periods=1).max().fillna(0.0)
df["_min_%d" % win_size] = df[column_name].rolling(window=win_size, center=False, min_periods=1).min().fillna(
0.0)
df["_min_centered_%d" % win_size] = df[column_name].rolling(window=win_size, center=True,
min_periods=1).min().fillna(0.0)
df["_var_%d" % win_size] = df[column_name].rolling(window=win_size, center=False, min_periods=1).var().fillna(
0.0)
df["_var_centered_%d" % win_size] = df[column_name].rolling(window=win_size, center=True,
min_periods=1).var().fillna(0.0)
df["_nat_%d" % win_size] = ((df[column_name] >= 50) & (df[column_name] < 100)).rolling(window=win_size,
center=False,
min_periods=1).sum().fillna(
0.0)
df["_nat_centered_%d" % win_size] = ((df[column_name] >= 50) & (df[column_name] < 100)).rolling(window=win_size,
center=True,
min_periods=1).sum().fillna(
0.0)
df["_anyact_%d" % win_size] = (df[column_name] > 0).rolling(window=win_size, center=False,
min_periods=1).sum().fillna(0.0)
df["_anyact_centered_%d" % win_size] = (df[column_name] > 0).rolling(window=win_size, center=True,
min_periods=1).sum().fillna(0.0)
if win_size > 3:
df["_skew_%d" % win_size] = df[column_name].rolling(window=win_size, center=False,
min_periods=1).skew().fillna(0.0)
df["_skew_centered_%d" % win_size] = df[column_name].rolling(window=win_size, center=True,
min_periods=1).skew().fillna(0.0)
#
df["_kurt_%d" % win_size] = df[column_name].rolling(window=win_size, center=False,
min_periods=1).kurt().fillna(0.0)
df["_kurt_centered_%d" % win_size] = df[column_name].rolling(window=win_size, center=True,
min_periods=1).kurt().fillna(0.0)
# build up the
for variant in ["centered_", ""]:
feature_names.append("_mean_%s%d" % (variant, win_size))
feature_names.append("_median_%s%d" % (variant, win_size))
feature_names.append("_max_%s%d" % (variant, win_size))
feature_names.append("_min_%s%d" % (variant, win_size))
feature_names.append("_std_%s%d" % (variant, win_size))
feature_names.append("_var_%s%d" % (variant, win_size))
feature_names.append("_nat_%s%d" % (variant, win_size))
feature_names.append("_anyact_%s%d" % (variant, win_size))
if win_size > 3:
feature_names.append("_skew_%s%d" % (variant, win_size))
feature_names.append("_kurt_%s%d" % (variant, win_size))
df["_Act"] = (df[column_name]).fillna(0.0)
df["_LocAct"] = (df[column_name] + 1.).apply(np.log).fillna(0.0) # build up the n log transformation
feature_names.append("_LocAct") # add logarithm transformation
feature_names.append("_Act")
return feature_names
def get_hr_statistic_feature(heart_rate_values):
"""
:param heart_rate_values a windowed contain a time series of heart rates
"""
heart_rate_values = np.asarray(heart_rate_values)
min_hr = np.mean(heart_rate_values)
max_hr = np.max(heart_rate_values)
mean_hr = np.mean(heart_rate_values)
skw_hr = skew(heart_rate_values)
kurt_hr = kurtosis(heart_rate_values)
std_hr = np.std(heart_rate_values)
return {"min_hr": min_hr, "max_hr": max_hr, "mean_hr": mean_hr, "skw_hr": skw_hr, "kurt_hr": kurt_hr,
"std_hr": std_hr}
def load_results(folder, num_classes, modality, feature_type, hrv_win_len):
"""
Load results from machine learning based methods and combine with deep learning model based results
"""
MLRESULTS = os.path.join(folder, "%d_stages_%ds_ml_%s.csv" % (num_classes, hrv_win_len, modality))
dfml = pd.read_csv(MLRESULTS)
dfnn = get_nns(folder, num_classes, modality, feature_type, hrv_win_len)
dfml = dfml.rename(columns={"Unnamed: 0": "algs"})
dfnn = dfnn.rename(columns={"actValue": "activity"})
merged = pd.merge(dfml, dfnn, on=["mesaid", "linetime", "activity", "stages", "gt_sleep_block"])
assert len(merged.stages.unique()) == num_classes
for cl in ['activity_y', 'stages_y', 'gt_sleep_block_y']:
if cl in merged.columns:
del merged[cl]
merged["always_0"] = 0
merged["always_1"] = 1
merged["always_2"] = 2
merged["always_3"] = 3
merged["always_4"] = 4
# merged["sleep"] = (~merged["wake"].astype(np.bool)).astype(float)
return merged
def pvalue(results, alg1, alg2, metric):
"""
get the t statistic p-value from two algorithm
:param results:
:param alg1:
:param alg2:
:param metric:
:return:
"""
return ttest_ind(results[alg1][metric], results[alg2][metric])[1]
def make_one_block(source_df, start_idx, end_idx):
# create a new df from the source df index and fill zeros
result = pd.Series(data=0, index=source_df.index)
# set a block in dataset frame with value 1
result.loc[start_idx:end_idx] = 1
return result
def get_files_given_type(data_path, file_type):
"""
this function will return all file names with postfix
:param data_path:
:param file_type:
:return:
"""
print("searching csv files ...")
allfiles = os.listdir(data_path)
files = []
for idx, f in enumerate(allfiles):
if file_type in f:
files.append(os.path.basename(f))
print("total found {} files".format(len(files)))
files.sort()
return files
def plot_multiple_classifier_roc(files_path=None):
"""
it can generate a diagram contains of roc curve for multiple classifiers to show the performance
:param files_path:
:return:
"""
files = get_files_given_type(files_path, file_type='npz')
# plot roc curve
plt.figure(0).clf()
for npz_file in files:
with np.load(npz_file) as data:
label = data['experiment']
y_true = data['y_true']
y_pred = data['y_pred']
# label = np.random.randint(2, size=1000)
fpr, tpr, thresh = roc_curve(y_true, y_pred)
auc = roc_auc_score(y_true, y_pred)
plt.plot(fpr, tpr, label=label + " auc=%0.2f" % auc)
plt.plot([0, 1], [0, 1], color='red', linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.legend(loc="lower right")
plt.legend(loc=0)
def save_validation_logits(y_true, y_pred, classifier_name=None, file_path=None, ):
if file_path != None:
save_dict = {"experiment": classifier_name, 'y_true': y_true, 'y_pred': y_pred}
np.savez(file_path, **save_dict)
# we should first to check if the file existed if not create the file
def log_print_inference(y_test, yhat, label_value, target_names, epochs=0, tensor_board_path='', file_title=""
, args=None):
"""
Log inference results to tensor board path, we can track each experiment prediction result include accuracy, recall,
precision, F1 score, F1 report, confusion matrix and confusion matrix in picture format.
TODO: need add specificity, sensitivity, PPV, also we need log the args
TODO we need two levels performance evaluation. Classifier level and label level
=== Confusion Matrix ===
a b c d e f g <-- classified as
50 15 3 0 0 1 1 | a = build wind float
16 47 6 0 2 3 2 | b = build wind non-float
5 5 6 0 0 1 0 | c = vehic wind float
0 0 0 0 0 0 0 | d = vehic wind non-float
0 2 0 0 10 0 1 | e = containers
1 1 0 0 0 7 0 | f = tableware
3 2 0 0 0 1 23 | g = headlamps
=== Detailed Accuracy By Class ===
TP Rate FP Rate Precision Recall F-Measure MCC ROC Area PRC Area Class
0.714 0.174 0.667 0.714 0.690 0.532 0.806 0.667 build wind float
0.618 0.181 0.653 0.618 0.635 0.443 0.768 0.606 build wind non-float
0.353 0.046 0.400 0.353 0.375 0.325 0.766 0.251 vehic wind float
0.000 0.000 0.000 0.000 0.000 0.000 ? ? vehic wind non-float
0.769 0.010 0.833 0.769 0.800 0.788 0.872 0.575 containers
0.778 0.029 0.538 0.778 0.636 0.629 0.930 0.527 tableware
0.793 0.022 0.852 0.793 0.821 0.795 0.869 0.738 headlamps
0.668 0.130 0.670 0.668 0.668 0.539 0.807 0.611 Weighted Avg.
:param args:
:param file_title:
:param y_test:
:param yhat:
:param label_value:
:param target_names:
:param epochs:
:param tensor_board_path:
:return:
"""
if args is not None:
write_arguments_to_file(args, os.path.join(tensor_board_path, file_title + "_args.csv"))
if len(y_test.shape) > 2:
y_test = np.reshape(y_test, -1)
accuracy = accuracy_score(y_test, yhat)
print('Accuracy: %f' % accuracy)
precision = precision_score(y_test, yhat, average='macro')
print('Precision: %f' % precision)
recall = recall_score(y_test, yhat, average='macro')
print('Recall: %f' % recall)
f1_result = f1_score(y_test, yhat, average='macro')
print('F1 score: %f' % f1_result)
matrix = confusion_matrix(y_test, yhat, label_value)
report = classification_report(y_test, yhat, target_names=target_names, digits=4)
print("Classification report: \n")
print(report)
to_json = {'epoch_num': [epochs], 'accuracy': [accuracy], 'precision_weighted': [precision], 'recall': [recall],
'f1_result': [f1_result]}
result = pd.DataFrame.from_dict(to_json)
result.to_csv(os.path.join(tensor_board_path, file_title + "metrics_summary.csv"), index=False)
np.savetxt(os.path.join(tensor_board_path, file_title + 'confusion_matrix.txt'), matrix, fmt='%d', delimiter=',')
with open(os.path.join(tensor_board_path, file_title + "classification_report.txt"), "w") as text_file:
text_file.write(report)
# for binary classification we produce the ROC curve
if len(target_names) == 2:
ratio = sum(y_test) / len(y_test)
print("The ratio between negative and positive case are {}".format(str(ratio)))
# save the best trained model as well.
normal_path = plot_save_confusion_matrix(y_test, yhat, normalize=True, class_names=target_names,
location=tensor_board_path, title=file_title)
return [normal_path]
def log_print_metrics(y_pred, y_test, epochs, num_classes, note, tensorboard_path, args):
if len(y_pred.shape) > 1:
yhat_classes = np.argmax(y_pred, axis=-1)
else:
yhat_classes = y_pred
# Y_test_classes = np.reshape(y_test, (-1, 2))
if len(y_test.shape) > 1:
Y_test_classes = np.argmax(y_test, axis=-1)
else:
Y_test_classes = y_test
label_values, target_names = sleep_class_name_mapping(num_classes)
log_print_inference(Y_test_classes, yhat_classes, label_value=label_values, target_names=target_names,
epochs=epochs, tensor_board_path=tensorboard_path, file_title="dl_exp_%s" % note, args=args)
def sleep_class_name_mapping(num_classes):
if num_classes == 5:
label_values = [0, 1, 2, 3, 4]
target_names = ['Wake', 'N1', 'N2', 'N3', 'REM']
elif num_classes == 4:
label_values = [0, 1, 2, 3]
target_names = ['Wake', 'Light', 'Deep', 'REM']
elif num_classes == 3:
label_values = [0, 1, 2]
target_names = ['Wake', 'NREM', 'REM']
else:
label_values = [0, 1]
target_names = ['Wake', 'Sleep']
return label_values, target_names
def plot_pr_re_curve(y_true, y_prob, save_path=None):
# In matplotlib < 1.5, plt.fill_between does not have a 'step' argument
precision, recall, _ = precision_recall_curve(y_true, y_prob)
average_precision = average_precision_score(y_true, y_prob)
step_kwargs = ({'step': 'post'}
if 'step' in signature(plt.fill_between).parameters
else {})
plt.step(recall, precision, color='b', alpha=0.2,
where='post')
plt.fill_between(recall, precision, alpha=0.2, color='b', **step_kwargs)
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.ylim([0.0, 1.05])
plt.xlim([0.0, 1.0])
plt.title('2-class Precision-Recall curve: AP={0:0.2f}'.format(
average_precision))
def plot_roc_curve(y_true, y_prob, save_path=None):
if max(y_true) == 1:
return
fpr, tpr, thresholds = roc_curve(y_true, y_prob)
auc = roc_auc_score(y_true, y_prob)
####################################
# The optimal cut off would be where tpr is high and fpr is low
# tpr - (1-fpr) is zero or near to zero is the optimal cut off point
####################################
i = np.arange(len(tpr)) # index for df
roc = pd.DataFrame(
{'fpr': pd.Series(fpr, index=i), 'tpr': pd.Series(tpr, index=i), '1-fpr': pd.Series(1 - fpr, index=i),
'tf': pd.Series(tpr - (1 - fpr), index=i), 'thresholds': pd.Series(thresholds, index=i)})
roc_t = roc.loc[(roc.tf - 0).abs().argsort()[:1]]
if auc > 0.0:
# when we plot we have to make sure the x and y values are given
plt.plot(fpr, tpr, color='orange', label='ROC curve (AUC = %0.2f)' % auc)
else:
plt.plot(fpr, tpr, color='orange', label='ROC')
plt.plot(1 - fpr, tpr, color='red', label='1 - fpr, opt cut point = %0.2f' % roc_t['thresholds'])
plt.plot([0, 1], [0, 1], color='darkblue', linestyle='--')
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver Operating Characteristic (ROC) Curve')
plt.legend()
plt.show()
if len(save_path) > 0:
_save_path = os.path.join(save_path, "ROC_Curve.png")
plt.savefig(_save_path)
plt.clf()
return _save_path
return ''
def plot_roc_curve2(fpr, tpr, thresholds):
plt.figure()
plt.plot(fpr, tpr, color='darkorange', label='ROC curve (area = %0.2f)' % metrics.auc(fpr, tpr))
plt.plot([0, 1], [0, 1], color='navy', linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.legend(loc="lower right")
# create the axis of thresholds (scores)
ax2 = plt.gca().twinx()
ax2.plot(fpr, thresholds, markeredgecolor='r', linestyle='dashed', color='r')
ax2.set_ylabel('Threshold', color='r')
ax2.set_ylim([thresholds[-1], thresholds[0]])
ax2.set_xlim([fpr[0], fpr[-1]])
plt.savefig('roc_and_threshold.png')
plt.clf()
def calculate_roc(thresholds, embeddings1, embeddings2, actual_issame, nrof_folds=10):
assert (embeddings1.shape[0] == embeddings2.shape[0])
assert (embeddings1.shape[1] == embeddings2.shape[1])
nrof_pairs = min(len(actual_issame), embeddings1.shape[0])
nrof_thresholds = len(thresholds)
k_fold = KFold(n_splits=nrof_folds, shuffle=False)
tprs = np.zeros((nrof_folds, nrof_thresholds))
fprs = np.zeros((nrof_folds, nrof_thresholds))
accuracy = np.zeros((nrof_folds))
diff = np.subtract(embeddings1, embeddings2)
dist = np.sum(np.square(diff), 1)
indices = np.arange(nrof_pairs)
for fold_idx, (train_set, test_set) in enumerate(k_fold.split(indices)):
# Find the best threshold for the fold
acc_train = np.zeros((nrof_thresholds))
for threshold_idx, threshold in enumerate(thresholds):
_, _, acc_train[threshold_idx] = calculate_accuracy(threshold, dist[train_set], actual_issame[train_set])
best_threshold_index = np.argmax(acc_train)
for threshold_idx, threshold in enumerate(thresholds):
tprs[fold_idx, threshold_idx], fprs[fold_idx, threshold_idx], _ = calculate_accuracy(threshold,
dist[test_set],
actual_issame[
test_set])
_, _, accuracy[fold_idx] = calculate_accuracy(thresholds[best_threshold_index], dist[test_set],
actual_issame[test_set])
tpr = np.mean(tprs, 0)
fpr = | np.mean(fprs, 0) | numpy.mean |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
'''Tests for task decoding'''
import numpy as np
import pytest
import jams
import pumpp
# Sampling rate and hop are simple here to keep things
# divisible for inverse checks
@pytest.fixture()
def sr():
return 10
@pytest.fixture()
def hop_length():
return 1
@pytest.fixture()
def ann_tag():
ann = jams.Annotation(namespace='tag_gtzan', duration=10)
ann.append(time=0, duration=5, value='blues')
ann.append(time=1.5, duration=1.5, value='reggae')
return ann
@pytest.fixture()
def ann_vector():
ann = jams.Annotation(namespace='vector', duration=1)
ann.append(time=0, duration=0, value=np.arange(32))
return ann
@pytest.fixture()
def ann_beat():
ann = jams.Annotation(namespace='beat', duration=10)
# for n, i in enumerate(np.arange(0, 10, 0.5)):
# ann.append(time=i, duration=0, value=1 + (n % 4))
# Make up two measures of 4/4, plus two pickup beats
for t, v in [(0, -2), (0.5, -1),
(1, 1), (1.5, 2), (2, 3), (3, 4),
(3.5, 1), (4, 2), (4.5, 3), (5, 4),
(5.5, 1), (6, 2), (6.5, 3), (7, 4)]:
ann.append(time=t, duration=0, value=v)
return ann
@pytest.fixture()
def ann_chord():
ann = jams.Annotation(namespace='chord', duration=5)
for t, c in [(0, 'C'),
(1, 'C:maj'),
(2, 'D:min/3'),
(3, 'F#:7(*5)'),
(4, 'G:sus2')]:
ann.append(time=t, duration=1, value=c)
return ann
@pytest.fixture(params=[None, 0.5])
def p_self_chord(request):
return request.param
@pytest.fixture(params=[False, True])
def p_init_chord(request):
if request.param:
return np.ones(170) / 170
else:
return None
@pytest.fixture(params=[False, True])
def p_state_chord(request):
if request.param:
return np.ones(170) / 170
else:
return None
@pytest.fixture(params=[None, False, True])
def p_self_tags(request):
if request.param is None:
return None
if request.param:
return 0.5 * np.ones(10) # 10 tags in GTZAN
else:
return 0.5
@pytest.fixture(params=[False, True])
def p_init_tags(request):
if request.param:
return 0.5 * np.ones(10)
else:
return None
@pytest.fixture(params=[False, True])
def p_state_tags(request):
if request.param:
return 0.5 * np.ones(10)
else:
return None
@pytest.fixture(params=[None, False, True])
def p_self_beat(request):
if request.param is None:
return None
elif request.param:
return np.asarray([0.5, 0.0])
else:
return 0.5
@pytest.fixture(params=[None, False, True])
def p_self_down(request):
if request.param is None:
return None
elif request.param:
return np.asarray([0.5, 0.0])
else:
return 0.5
@pytest.fixture(params=[None, 0.5])
def p_init_beat(request):
return request.param
@pytest.fixture(params=[None, 0.5])
def p_init_down(request):
return request.param
@pytest.fixture(params=[None, 0.5])
def p_state_beat(request):
return request.param
@pytest.fixture(params=[None, 0.5])
def p_state_down(request):
return request.param
@pytest.fixture()
def ann_segment():
ann = jams.Annotation(namespace='segment_open', duration=5)
for t, c in [(0, 'A'),
(1, 'B'),
(2, 'A'),
(3, 'B'),
(4, 'C')]:
ann.append(time=t, duration=1, value=c)
return ann
@pytest.fixture()
def ann_key():
ann = jams.Annotation(namespace='key_mode', duration=5)
for t, c in [(0, 'A:major'),
(1, 'Bb:lydian'),
(2, 'A:minor'),
(3, 'B:major'),
(4, 'C:dorian')]:
ann.append(time=t, duration=1, value=c)
return ann
@pytest.fixture(params=[None, 0.5])
def p_self_key(request):
return request.param
@pytest.fixture(params=[False, True])
def p_init_key(request):
if request.param:
return np.ones(109) / 109
else:
return None
@pytest.fixture(params=[False, True])
def p_state_key(request):
if request.param:
return np.ones(109) / 109
else:
return None
def test_decode_tags_dynamic_hard(sr, hop_length, ann_tag, p_self_tags, p_init_tags, p_state_tags):
# This test encodes an annotation, decodes it, and then re-encodes it
# It passes if the re-encoded version matches the initial encoding
tc = pumpp.task.DynamicLabelTransformer('genre', 'tag_gtzan',
hop_length=hop_length,
sr=sr,
p_self=p_self_tags,
p_init=p_init_tags,
p_state=p_state_tags)
data = tc.transform_annotation(ann_tag, ann_tag.duration)
inverse = tc.inverse(data['tags'], duration=ann_tag.duration)
for obs in inverse:
assert 0. <= obs.confidence <= 1.
data2 = tc.transform_annotation(inverse, ann_tag.duration)
assert np.allclose(data['tags'], data2['tags'])
def test_decode_tags_dynamic_soft(sr, hop_length, ann_tag, p_self_tags, p_init_tags, p_state_tags):
# This test encodes an annotation, decodes it, and then re-encodes it
# It passes if the re-encoded version matches the initial encoding
tc = pumpp.task.DynamicLabelTransformer('genre', 'tag_gtzan',
hop_length=hop_length,
sr=sr,
p_self=p_self_tags,
p_init=p_init_tags,
p_state=p_state_tags)
data = tc.transform_annotation(ann_tag, ann_tag.duration)
# Soften the data, but preserve the decisions
tags_predict = 0.9 * data['tags'] + 0.1 * | np.ones_like(data['tags']) | numpy.ones_like |
from pylab import *
import numpy as np
import operator
## Search-Routine Parameters
n = 2
mu_exp = 2.0
mu_rifl = 1.0
mu_contr_ex = 1.0/2
mu_contr_int = -1.0/2
mu_red = 1.0/2
## Random Trial Simplex [may need a careful range-definition]
def vertici_iniziali():
import random
random.seed()
vertici=[]
for i in range(n+1):
x=random.uniform(-12,-8);
y=random.uniform(0,3);
vertici.append([x,y])
return vertici
vertex=vertici_iniziali()
x1=vertex[0]
x2=vertex[1]
x3=vertex[2]
## Target Functions [uncomment the desired one]
def f(x,y):
#return -1.0*np.cos(x)*np.cos(y)*np.exp(-((x-np.pi)**2+(y-np.pi)**2)) # EASOM
#return np.exp(0.5*(x**2+y**2-25)**2)+(np.sin(4*x-3*y))**4+0.5*(2*x+y-10)**2 # GOLDSTEIN-PRICE
return 100*abs(y-0.01*x**2)+0.01*abs(x+10) # BUKIN 6th
#return (x+2*y-7)**2+(2*x+y-5)**2 # BOOTH
data_x=[x1,x2,x3]
data_f=[f(x1[0],x1[1]),f(x2[0],x2[1]),f(x3[0],x3[1])]
data=[[f(x1[0],x1[1]),x1],[f(x2[0],x2[1]),x2],[f(x3[0],x3[1]),x3]]
print (data)
## Ordering [increasing f(x)]
data=sorted(data,key=operator.itemgetter(0))
data_f= [item[0] for item in data]
data_x= [item[1] for item in data]
print (data_f, 'f(trial simplex)')
print ( data_x ,'trial simplex')
## Plotting the Target Function and the Trial Simplex
xvec = np.linspace(-12, -8, 1000)
yvec = np.linspace(-0, 3, 1000)
X,Y = np.meshgrid(xvec, yvec)
Z = f(X, Y).T
fig, ax = subplots()
im = imshow(Z, cmap=cm.magma, vmin=Z.min(), vmax=Z.max(), extent=[-12, -8, 0, 3])
im.set_interpolation('bilinear')
cb = fig.colorbar(im)
Xvertex = np.array([])
Yvertex = np.array([])
for i in range(n+1):
Xvertex = np.append(Xvertex, data_x[i][0])
Yvertex = np.append(Yvertex, data_x[i][1])
plt.scatter(Xvertex,Yvertex, color='green', edgecolor='black', s=200)
coord = data_x
coord.append(coord[0]) # have to repeat the first point to create a 'closed loop'
xs, ys = zip(*coord) # creates lists of x and y values
plt.plot(xs,ys, color='white', alpha=0.3, ls='--') # Polytope draws up
## Evolving the Simplex
epsilon = 10**(-5) # See...
loop=1
while data_f[n]- data_f[0] > epsilon: # ...this!
loop=loop+1
## Centroid
a=np.array(data_x[0:n])
a=a/n
xc=a.sum(axis=0)
xr=(1+mu_rifl)*xc-mu_rifl*np.array(data_x[n])
fr=f(xr[0],xr[1])
print ( fr, 'fr')
## Reflection step
if data_f[0]<=fr<data_f[n-1]:
data[n][1]=xr
data[n][0]=f(xr[0],xr[1])
data=sorted(data,key=operator.itemgetter(0))
print('loop',loop,'riflessione')
data_f= [item[0] for item in data]
data_x= [item[1] for item in data]
print (data_f, 'valori funzione ')
print ( data_x ,'vertici ')
for i in range(n+1):
Xvertex = np.append(Xvertex, data_x[i][0])
Yvertex = np.append(Yvertex, data_x[i][1])
plt.scatter(Xvertex,Yvertex, color='white', edgecolor='black')
coord = data_x
coord.append(coord[0]) # repeat the first point to create a 'closed loop'
xs, ys = zip(*coord) # create lists of x and y values
plt.plot(xs,ys, color='white', alpha=0.3, ls='--')
continue
## Expansion step
if fr<data_f[0]:
a=np.array(data_x[0:n])
a=a/n
xc=a.sum(axis=0)
xe=(1+mu_exp)*xc-mu_exp*np.array(data_x[n])
fe=f(xe[0],xe[1])
if fe<fr:
data[n][1]=xe
data[n][0]=f(xe[0],xe[1])
data=sorted(data,key=operator.itemgetter(0))
print('loop' ,loop,'espansione')
data_f= [item[0] for item in data]
data_x= [item[1] for item in data]
print (data_f, 'valori funzione ')
print ( data_x ,'vertici ')
for i in range(n+1):
Xvertex = np.append(Xvertex, data_x[i][0])
Yvertex = np.append(Yvertex, data_x[i][1])
plt.scatter(Xvertex,Yvertex, color='white', edgecolor='black')
coord = data_x
coord.append(coord[0]) # repeat the first point to create a 'closed loop'
xs, ys = zip(*coord) # create lists of x and y values
plt.plot(xs,ys, color='white', alpha=0.3, ls='--')
continue
else:
data[n][1]=xr
data[n][0]=f(xr[0],xr[1])
data=sorted(data,key=operator.itemgetter(0))
print('loop',loop,'riflessione')
data_f= [item[0] for item in data]
data_x= [item[1] for item in data]
print (data_f, 'valori funzione ')
print ( data_x ,'vertici ')
for i in range(n+1):
Xvertex = np.append(Xvertex, data_x[i][0])
Yvertex = np.append(Yvertex, data_x[i][1])
plt.scatter(Xvertex,Yvertex, color='white', edgecolor='black')
coord = data_x
coord.append(coord[0]) # repeat the first point to create a 'closed loop'
xs, ys = zip(*coord) # create lists of x and y values
plt.plot(xs,ys, color='white', alpha=0.3, ls='--')
continue
## External-Contraction step
if data_f[n-1]<=fr<data_f[n]:
a=np.array(data_x[0:n])
a=a/n
xc=a.sum(axis=0)
xoc=(1+mu_contr_ex)*xc-mu_contr_ex*np.array(data_x[n])
foc=f(xoc[0],xoc[1])
if foc<fr:
data[n][1]=xoc
data[n][0]=f(xoc[0],xoc[1])
data=sorted(data,key=operator.itemgetter(0))
print( 'loop' ,loop,'contrazione esterna')
data_f= [item[0] for item in data]
data_x= [item[1] for item in data]
print (data_f, 'valori funzione ')
print ( data_x ,'vertici ')
for i in range(n+1):
Xvertex = np.append(Xvertex, data_x[i][0])
Yvertex = np.append(Yvertex, data_x[i][1])
plt.scatter(Xvertex,Yvertex, color='white', edgecolor='black')
coord = data_x
coord.append(coord[0]) # repeat the first point to create a 'closed loop'
xs, ys = zip(*coord) # create lists of x and y values
plt.plot(xs,ys, color='white', alpha=0.3, ls='--')
continue
## Reduction step [!]
else:
a=np.array(data_x)
for i in range(1,n+1):
data[i][1]=a[0]+mu_red*(a[i]-a[0])
data[i][0]=f(data[i][1][0],data[i][1][1])
data=sorted(data,key=operator.itemgetter(0))
print('loop',loop,'riduzione')
data_f= [item[0] for item in data]
data_x= [item[1] for item in data]
print (data_f, 'valori funzione ')
print ( data_x ,'vertici ')
for i in range(n+1):
Xvertex = np.append(Xvertex, data_x[i][0])
Yvertex = np.append(Yvertex, data_x[i][1])
plt.scatter(Xvertex,Yvertex, color='white', edgecolor='black')
coord = data_x
coord.append(coord[0]) # repeat the first point to create a 'closed loop'
xs, ys = zip(*coord) # create lists of x and y values
plt.plot(xs,ys, color='white', alpha=0.3, ls='--')
continue
## Internal-Contraction step
if fr>=data_f[n]:
a=np.array(data_x[0:n])
a=a/n
xc=a.sum(axis=0)
xic=(1+mu_contr_int)*xc-mu_contr_int*np.array(data_x[n])
fic=f(xic[0],xic[1])
if fic<data_f[n]:
data[n][1]=xic
data[n][0]=f(xic[0],xic[1])
data=sorted(data,key=operator.itemgetter(0))
print('loop',loop ,'contrazione interna')
data_f= [item[0] for item in data]
data_x= [item[1] for item in data]
print (data_f, 'valori funzione ')
print ( data_x ,'vertici ')
for i in range(n+1):
Xvertex = np.append(Xvertex, data_x[i][0])
Yvertex = np.append(Yvertex, data_x[i][1])
plt.scatter(Xvertex,Yvertex, color='white', edgecolor='black')
coord = data_x
coord.append(coord[0]) # repeat the first point to create a 'closed loop'
xs, ys = zip(*coord) # create lists of x and y values
plt.plot(xs,ys, color='white', alpha=0.3, ls='--')
continue
## Reduction step [!]
else:
a= | np.array(data_x) | numpy.array |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.